drm/i915: Check for invalid cloning earlier during modeset
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_gem_dmabuf.h"
40 #include "intel_dsi.h"
41 #include "i915_trace.h"
42 #include <drm/drm_atomic.h>
43 #include <drm/drm_atomic_helper.h>
44 #include <drm/drm_dp_helper.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_plane_helper.h>
47 #include <drm/drm_rect.h>
48 #include <linux/dma_remapping.h>
49 #include <linux/reservation.h>
50
51 static bool is_mmio_work(struct intel_flip_work *work)
52 {
53         return work->mmio_work.func;
54 }
55
56 /* Primary plane formats for gen <= 3 */
57 static const uint32_t i8xx_primary_formats[] = {
58         DRM_FORMAT_C8,
59         DRM_FORMAT_RGB565,
60         DRM_FORMAT_XRGB1555,
61         DRM_FORMAT_XRGB8888,
62 };
63
64 /* Primary plane formats for gen >= 4 */
65 static const uint32_t i965_primary_formats[] = {
66         DRM_FORMAT_C8,
67         DRM_FORMAT_RGB565,
68         DRM_FORMAT_XRGB8888,
69         DRM_FORMAT_XBGR8888,
70         DRM_FORMAT_XRGB2101010,
71         DRM_FORMAT_XBGR2101010,
72 };
73
74 static const uint32_t skl_primary_formats[] = {
75         DRM_FORMAT_C8,
76         DRM_FORMAT_RGB565,
77         DRM_FORMAT_XRGB8888,
78         DRM_FORMAT_XBGR8888,
79         DRM_FORMAT_ARGB8888,
80         DRM_FORMAT_ABGR8888,
81         DRM_FORMAT_XRGB2101010,
82         DRM_FORMAT_XBGR2101010,
83         DRM_FORMAT_YUYV,
84         DRM_FORMAT_YVYU,
85         DRM_FORMAT_UYVY,
86         DRM_FORMAT_VYUY,
87 };
88
89 /* Cursor formats */
90 static const uint32_t intel_cursor_formats[] = {
91         DRM_FORMAT_ARGB8888,
92 };
93
94 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
95                                 struct intel_crtc_state *pipe_config);
96 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
97                                    struct intel_crtc_state *pipe_config);
98
99 static int intel_framebuffer_init(struct drm_device *dev,
100                                   struct intel_framebuffer *ifb,
101                                   struct drm_mode_fb_cmd2 *mode_cmd,
102                                   struct drm_i915_gem_object *obj);
103 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
104 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
105 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
106 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
107                                          struct intel_link_m_n *m_n,
108                                          struct intel_link_m_n *m2_n2);
109 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
110 static void haswell_set_pipeconf(struct drm_crtc *crtc);
111 static void haswell_set_pipemisc(struct drm_crtc *crtc);
112 static void vlv_prepare_pll(struct intel_crtc *crtc,
113                             const struct intel_crtc_state *pipe_config);
114 static void chv_prepare_pll(struct intel_crtc *crtc,
115                             const struct intel_crtc_state *pipe_config);
116 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
118 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
119         struct intel_crtc_state *crtc_state);
120 static void skylake_pfit_enable(struct intel_crtc *crtc);
121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122 static void ironlake_pfit_enable(struct intel_crtc *crtc);
123 static void intel_modeset_setup_hw_state(struct drm_device *dev);
124 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125 static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126 static int bxt_calc_cdclk(int max_pixclk);
127
128 struct intel_limit {
129         struct {
130                 int min, max;
131         } dot, vco, n, m, m1, m2, p, p1;
132
133         struct {
134                 int dot_limit;
135                 int p2_slow, p2_fast;
136         } p2;
137 };
138
139 /* returns HPLL frequency in kHz */
140 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
141 {
142         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
143
144         /* Obtain SKU information */
145         mutex_lock(&dev_priv->sb_lock);
146         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
147                 CCK_FUSE_HPLL_FREQ_MASK;
148         mutex_unlock(&dev_priv->sb_lock);
149
150         return vco_freq[hpll_freq] * 1000;
151 }
152
153 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
154                       const char *name, u32 reg, int ref_freq)
155 {
156         u32 val;
157         int divider;
158
159         mutex_lock(&dev_priv->sb_lock);
160         val = vlv_cck_read(dev_priv, reg);
161         mutex_unlock(&dev_priv->sb_lock);
162
163         divider = val & CCK_FREQUENCY_VALUES;
164
165         WARN((val & CCK_FREQUENCY_STATUS) !=
166              (divider << CCK_FREQUENCY_STATUS_SHIFT),
167              "%s change in progress\n", name);
168
169         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
170 }
171
172 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
173                                   const char *name, u32 reg)
174 {
175         if (dev_priv->hpll_freq == 0)
176                 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
177
178         return vlv_get_cck_clock(dev_priv, name, reg,
179                                  dev_priv->hpll_freq);
180 }
181
182 static int
183 intel_pch_rawclk(struct drm_i915_private *dev_priv)
184 {
185         return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
186 }
187
188 static int
189 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
190 {
191         /* RAWCLK_FREQ_VLV register updated from power well code */
192         return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
193                                       CCK_DISPLAY_REF_CLOCK_CONTROL);
194 }
195
196 static int
197 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
198 {
199         uint32_t clkcfg;
200
201         /* hrawclock is 1/4 the FSB frequency */
202         clkcfg = I915_READ(CLKCFG);
203         switch (clkcfg & CLKCFG_FSB_MASK) {
204         case CLKCFG_FSB_400:
205                 return 100000;
206         case CLKCFG_FSB_533:
207                 return 133333;
208         case CLKCFG_FSB_667:
209                 return 166667;
210         case CLKCFG_FSB_800:
211                 return 200000;
212         case CLKCFG_FSB_1067:
213                 return 266667;
214         case CLKCFG_FSB_1333:
215                 return 333333;
216         /* these two are just a guess; one of them might be right */
217         case CLKCFG_FSB_1600:
218         case CLKCFG_FSB_1600_ALT:
219                 return 400000;
220         default:
221                 return 133333;
222         }
223 }
224
225 void intel_update_rawclk(struct drm_i915_private *dev_priv)
226 {
227         if (HAS_PCH_SPLIT(dev_priv))
228                 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
229         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
230                 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
231         else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
232                 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
233         else
234                 return; /* no rawclk on other platforms, or no need to know it */
235
236         DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
237 }
238
239 static void intel_update_czclk(struct drm_i915_private *dev_priv)
240 {
241         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
242                 return;
243
244         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
245                                                       CCK_CZ_CLOCK_CONTROL);
246
247         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
248 }
249
250 static inline u32 /* units of 100MHz */
251 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
252                     const struct intel_crtc_state *pipe_config)
253 {
254         if (HAS_DDI(dev_priv))
255                 return pipe_config->port_clock; /* SPLL */
256         else if (IS_GEN5(dev_priv))
257                 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
258         else
259                 return 270000;
260 }
261
262 static const struct intel_limit intel_limits_i8xx_dac = {
263         .dot = { .min = 25000, .max = 350000 },
264         .vco = { .min = 908000, .max = 1512000 },
265         .n = { .min = 2, .max = 16 },
266         .m = { .min = 96, .max = 140 },
267         .m1 = { .min = 18, .max = 26 },
268         .m2 = { .min = 6, .max = 16 },
269         .p = { .min = 4, .max = 128 },
270         .p1 = { .min = 2, .max = 33 },
271         .p2 = { .dot_limit = 165000,
272                 .p2_slow = 4, .p2_fast = 2 },
273 };
274
275 static const struct intel_limit intel_limits_i8xx_dvo = {
276         .dot = { .min = 25000, .max = 350000 },
277         .vco = { .min = 908000, .max = 1512000 },
278         .n = { .min = 2, .max = 16 },
279         .m = { .min = 96, .max = 140 },
280         .m1 = { .min = 18, .max = 26 },
281         .m2 = { .min = 6, .max = 16 },
282         .p = { .min = 4, .max = 128 },
283         .p1 = { .min = 2, .max = 33 },
284         .p2 = { .dot_limit = 165000,
285                 .p2_slow = 4, .p2_fast = 4 },
286 };
287
288 static const struct intel_limit intel_limits_i8xx_lvds = {
289         .dot = { .min = 25000, .max = 350000 },
290         .vco = { .min = 908000, .max = 1512000 },
291         .n = { .min = 2, .max = 16 },
292         .m = { .min = 96, .max = 140 },
293         .m1 = { .min = 18, .max = 26 },
294         .m2 = { .min = 6, .max = 16 },
295         .p = { .min = 4, .max = 128 },
296         .p1 = { .min = 1, .max = 6 },
297         .p2 = { .dot_limit = 165000,
298                 .p2_slow = 14, .p2_fast = 7 },
299 };
300
301 static const struct intel_limit intel_limits_i9xx_sdvo = {
302         .dot = { .min = 20000, .max = 400000 },
303         .vco = { .min = 1400000, .max = 2800000 },
304         .n = { .min = 1, .max = 6 },
305         .m = { .min = 70, .max = 120 },
306         .m1 = { .min = 8, .max = 18 },
307         .m2 = { .min = 3, .max = 7 },
308         .p = { .min = 5, .max = 80 },
309         .p1 = { .min = 1, .max = 8 },
310         .p2 = { .dot_limit = 200000,
311                 .p2_slow = 10, .p2_fast = 5 },
312 };
313
314 static const struct intel_limit intel_limits_i9xx_lvds = {
315         .dot = { .min = 20000, .max = 400000 },
316         .vco = { .min = 1400000, .max = 2800000 },
317         .n = { .min = 1, .max = 6 },
318         .m = { .min = 70, .max = 120 },
319         .m1 = { .min = 8, .max = 18 },
320         .m2 = { .min = 3, .max = 7 },
321         .p = { .min = 7, .max = 98 },
322         .p1 = { .min = 1, .max = 8 },
323         .p2 = { .dot_limit = 112000,
324                 .p2_slow = 14, .p2_fast = 7 },
325 };
326
327
328 static const struct intel_limit intel_limits_g4x_sdvo = {
329         .dot = { .min = 25000, .max = 270000 },
330         .vco = { .min = 1750000, .max = 3500000},
331         .n = { .min = 1, .max = 4 },
332         .m = { .min = 104, .max = 138 },
333         .m1 = { .min = 17, .max = 23 },
334         .m2 = { .min = 5, .max = 11 },
335         .p = { .min = 10, .max = 30 },
336         .p1 = { .min = 1, .max = 3},
337         .p2 = { .dot_limit = 270000,
338                 .p2_slow = 10,
339                 .p2_fast = 10
340         },
341 };
342
343 static const struct intel_limit intel_limits_g4x_hdmi = {
344         .dot = { .min = 22000, .max = 400000 },
345         .vco = { .min = 1750000, .max = 3500000},
346         .n = { .min = 1, .max = 4 },
347         .m = { .min = 104, .max = 138 },
348         .m1 = { .min = 16, .max = 23 },
349         .m2 = { .min = 5, .max = 11 },
350         .p = { .min = 5, .max = 80 },
351         .p1 = { .min = 1, .max = 8},
352         .p2 = { .dot_limit = 165000,
353                 .p2_slow = 10, .p2_fast = 5 },
354 };
355
356 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
357         .dot = { .min = 20000, .max = 115000 },
358         .vco = { .min = 1750000, .max = 3500000 },
359         .n = { .min = 1, .max = 3 },
360         .m = { .min = 104, .max = 138 },
361         .m1 = { .min = 17, .max = 23 },
362         .m2 = { .min = 5, .max = 11 },
363         .p = { .min = 28, .max = 112 },
364         .p1 = { .min = 2, .max = 8 },
365         .p2 = { .dot_limit = 0,
366                 .p2_slow = 14, .p2_fast = 14
367         },
368 };
369
370 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
371         .dot = { .min = 80000, .max = 224000 },
372         .vco = { .min = 1750000, .max = 3500000 },
373         .n = { .min = 1, .max = 3 },
374         .m = { .min = 104, .max = 138 },
375         .m1 = { .min = 17, .max = 23 },
376         .m2 = { .min = 5, .max = 11 },
377         .p = { .min = 14, .max = 42 },
378         .p1 = { .min = 2, .max = 6 },
379         .p2 = { .dot_limit = 0,
380                 .p2_slow = 7, .p2_fast = 7
381         },
382 };
383
384 static const struct intel_limit intel_limits_pineview_sdvo = {
385         .dot = { .min = 20000, .max = 400000},
386         .vco = { .min = 1700000, .max = 3500000 },
387         /* Pineview's Ncounter is a ring counter */
388         .n = { .min = 3, .max = 6 },
389         .m = { .min = 2, .max = 256 },
390         /* Pineview only has one combined m divider, which we treat as m2. */
391         .m1 = { .min = 0, .max = 0 },
392         .m2 = { .min = 0, .max = 254 },
393         .p = { .min = 5, .max = 80 },
394         .p1 = { .min = 1, .max = 8 },
395         .p2 = { .dot_limit = 200000,
396                 .p2_slow = 10, .p2_fast = 5 },
397 };
398
399 static const struct intel_limit intel_limits_pineview_lvds = {
400         .dot = { .min = 20000, .max = 400000 },
401         .vco = { .min = 1700000, .max = 3500000 },
402         .n = { .min = 3, .max = 6 },
403         .m = { .min = 2, .max = 256 },
404         .m1 = { .min = 0, .max = 0 },
405         .m2 = { .min = 0, .max = 254 },
406         .p = { .min = 7, .max = 112 },
407         .p1 = { .min = 1, .max = 8 },
408         .p2 = { .dot_limit = 112000,
409                 .p2_slow = 14, .p2_fast = 14 },
410 };
411
412 /* Ironlake / Sandybridge
413  *
414  * We calculate clock using (register_value + 2) for N/M1/M2, so here
415  * the range value for them is (actual_value - 2).
416  */
417 static const struct intel_limit intel_limits_ironlake_dac = {
418         .dot = { .min = 25000, .max = 350000 },
419         .vco = { .min = 1760000, .max = 3510000 },
420         .n = { .min = 1, .max = 5 },
421         .m = { .min = 79, .max = 127 },
422         .m1 = { .min = 12, .max = 22 },
423         .m2 = { .min = 5, .max = 9 },
424         .p = { .min = 5, .max = 80 },
425         .p1 = { .min = 1, .max = 8 },
426         .p2 = { .dot_limit = 225000,
427                 .p2_slow = 10, .p2_fast = 5 },
428 };
429
430 static const struct intel_limit intel_limits_ironlake_single_lvds = {
431         .dot = { .min = 25000, .max = 350000 },
432         .vco = { .min = 1760000, .max = 3510000 },
433         .n = { .min = 1, .max = 3 },
434         .m = { .min = 79, .max = 118 },
435         .m1 = { .min = 12, .max = 22 },
436         .m2 = { .min = 5, .max = 9 },
437         .p = { .min = 28, .max = 112 },
438         .p1 = { .min = 2, .max = 8 },
439         .p2 = { .dot_limit = 225000,
440                 .p2_slow = 14, .p2_fast = 14 },
441 };
442
443 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
444         .dot = { .min = 25000, .max = 350000 },
445         .vco = { .min = 1760000, .max = 3510000 },
446         .n = { .min = 1, .max = 3 },
447         .m = { .min = 79, .max = 127 },
448         .m1 = { .min = 12, .max = 22 },
449         .m2 = { .min = 5, .max = 9 },
450         .p = { .min = 14, .max = 56 },
451         .p1 = { .min = 2, .max = 8 },
452         .p2 = { .dot_limit = 225000,
453                 .p2_slow = 7, .p2_fast = 7 },
454 };
455
456 /* LVDS 100mhz refclk limits. */
457 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
458         .dot = { .min = 25000, .max = 350000 },
459         .vco = { .min = 1760000, .max = 3510000 },
460         .n = { .min = 1, .max = 2 },
461         .m = { .min = 79, .max = 126 },
462         .m1 = { .min = 12, .max = 22 },
463         .m2 = { .min = 5, .max = 9 },
464         .p = { .min = 28, .max = 112 },
465         .p1 = { .min = 2, .max = 8 },
466         .p2 = { .dot_limit = 225000,
467                 .p2_slow = 14, .p2_fast = 14 },
468 };
469
470 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
471         .dot = { .min = 25000, .max = 350000 },
472         .vco = { .min = 1760000, .max = 3510000 },
473         .n = { .min = 1, .max = 3 },
474         .m = { .min = 79, .max = 126 },
475         .m1 = { .min = 12, .max = 22 },
476         .m2 = { .min = 5, .max = 9 },
477         .p = { .min = 14, .max = 42 },
478         .p1 = { .min = 2, .max = 6 },
479         .p2 = { .dot_limit = 225000,
480                 .p2_slow = 7, .p2_fast = 7 },
481 };
482
483 static const struct intel_limit intel_limits_vlv = {
484          /*
485           * These are the data rate limits (measured in fast clocks)
486           * since those are the strictest limits we have. The fast
487           * clock and actual rate limits are more relaxed, so checking
488           * them would make no difference.
489           */
490         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
491         .vco = { .min = 4000000, .max = 6000000 },
492         .n = { .min = 1, .max = 7 },
493         .m1 = { .min = 2, .max = 3 },
494         .m2 = { .min = 11, .max = 156 },
495         .p1 = { .min = 2, .max = 3 },
496         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
497 };
498
499 static const struct intel_limit intel_limits_chv = {
500         /*
501          * These are the data rate limits (measured in fast clocks)
502          * since those are the strictest limits we have.  The fast
503          * clock and actual rate limits are more relaxed, so checking
504          * them would make no difference.
505          */
506         .dot = { .min = 25000 * 5, .max = 540000 * 5},
507         .vco = { .min = 4800000, .max = 6480000 },
508         .n = { .min = 1, .max = 1 },
509         .m1 = { .min = 2, .max = 2 },
510         .m2 = { .min = 24 << 22, .max = 175 << 22 },
511         .p1 = { .min = 2, .max = 4 },
512         .p2 = { .p2_slow = 1, .p2_fast = 14 },
513 };
514
515 static const struct intel_limit intel_limits_bxt = {
516         /* FIXME: find real dot limits */
517         .dot = { .min = 0, .max = INT_MAX },
518         .vco = { .min = 4800000, .max = 6700000 },
519         .n = { .min = 1, .max = 1 },
520         .m1 = { .min = 2, .max = 2 },
521         /* FIXME: find real m2 limits */
522         .m2 = { .min = 2 << 22, .max = 255 << 22 },
523         .p1 = { .min = 2, .max = 4 },
524         .p2 = { .p2_slow = 1, .p2_fast = 20 },
525 };
526
527 static bool
528 needs_modeset(struct drm_crtc_state *state)
529 {
530         return drm_atomic_crtc_needs_modeset(state);
531 }
532
533 /*
534  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
535  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
536  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
537  * The helpers' return value is the rate of the clock that is fed to the
538  * display engine's pipe which can be the above fast dot clock rate or a
539  * divided-down version of it.
540  */
541 /* m1 is reserved as 0 in Pineview, n is a ring counter */
542 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
543 {
544         clock->m = clock->m2 + 2;
545         clock->p = clock->p1 * clock->p2;
546         if (WARN_ON(clock->n == 0 || clock->p == 0))
547                 return 0;
548         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
549         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
550
551         return clock->dot;
552 }
553
554 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
555 {
556         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
557 }
558
559 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
560 {
561         clock->m = i9xx_dpll_compute_m(clock);
562         clock->p = clock->p1 * clock->p2;
563         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
564                 return 0;
565         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
566         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
567
568         return clock->dot;
569 }
570
571 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
572 {
573         clock->m = clock->m1 * clock->m2;
574         clock->p = clock->p1 * clock->p2;
575         if (WARN_ON(clock->n == 0 || clock->p == 0))
576                 return 0;
577         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
578         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
579
580         return clock->dot / 5;
581 }
582
583 int chv_calc_dpll_params(int refclk, struct dpll *clock)
584 {
585         clock->m = clock->m1 * clock->m2;
586         clock->p = clock->p1 * clock->p2;
587         if (WARN_ON(clock->n == 0 || clock->p == 0))
588                 return 0;
589         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
590                         clock->n << 22);
591         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
592
593         return clock->dot / 5;
594 }
595
596 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
597 /**
598  * Returns whether the given set of divisors are valid for a given refclk with
599  * the given connectors.
600  */
601
602 static bool intel_PLL_is_valid(struct drm_device *dev,
603                                const struct intel_limit *limit,
604                                const struct dpll *clock)
605 {
606         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
607                 INTELPllInvalid("n out of range\n");
608         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
609                 INTELPllInvalid("p1 out of range\n");
610         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
611                 INTELPllInvalid("m2 out of range\n");
612         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
613                 INTELPllInvalid("m1 out of range\n");
614
615         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
616             !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
617                 if (clock->m1 <= clock->m2)
618                         INTELPllInvalid("m1 <= m2\n");
619
620         if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
621                 if (clock->p < limit->p.min || limit->p.max < clock->p)
622                         INTELPllInvalid("p out of range\n");
623                 if (clock->m < limit->m.min || limit->m.max < clock->m)
624                         INTELPllInvalid("m out of range\n");
625         }
626
627         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
628                 INTELPllInvalid("vco out of range\n");
629         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
630          * connector, etc., rather than just a single range.
631          */
632         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
633                 INTELPllInvalid("dot out of range\n");
634
635         return true;
636 }
637
638 static int
639 i9xx_select_p2_div(const struct intel_limit *limit,
640                    const struct intel_crtc_state *crtc_state,
641                    int target)
642 {
643         struct drm_device *dev = crtc_state->base.crtc->dev;
644
645         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
646                 /*
647                  * For LVDS just rely on its current settings for dual-channel.
648                  * We haven't figured out how to reliably set up different
649                  * single/dual channel state, if we even can.
650                  */
651                 if (intel_is_dual_link_lvds(dev))
652                         return limit->p2.p2_fast;
653                 else
654                         return limit->p2.p2_slow;
655         } else {
656                 if (target < limit->p2.dot_limit)
657                         return limit->p2.p2_slow;
658                 else
659                         return limit->p2.p2_fast;
660         }
661 }
662
663 /*
664  * Returns a set of divisors for the desired target clock with the given
665  * refclk, or FALSE.  The returned values represent the clock equation:
666  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
667  *
668  * Target and reference clocks are specified in kHz.
669  *
670  * If match_clock is provided, then best_clock P divider must match the P
671  * divider from @match_clock used for LVDS downclocking.
672  */
673 static bool
674 i9xx_find_best_dpll(const struct intel_limit *limit,
675                     struct intel_crtc_state *crtc_state,
676                     int target, int refclk, struct dpll *match_clock,
677                     struct dpll *best_clock)
678 {
679         struct drm_device *dev = crtc_state->base.crtc->dev;
680         struct dpll clock;
681         int err = target;
682
683         memset(best_clock, 0, sizeof(*best_clock));
684
685         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
686
687         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
688              clock.m1++) {
689                 for (clock.m2 = limit->m2.min;
690                      clock.m2 <= limit->m2.max; clock.m2++) {
691                         if (clock.m2 >= clock.m1)
692                                 break;
693                         for (clock.n = limit->n.min;
694                              clock.n <= limit->n.max; clock.n++) {
695                                 for (clock.p1 = limit->p1.min;
696                                         clock.p1 <= limit->p1.max; clock.p1++) {
697                                         int this_err;
698
699                                         i9xx_calc_dpll_params(refclk, &clock);
700                                         if (!intel_PLL_is_valid(dev, limit,
701                                                                 &clock))
702                                                 continue;
703                                         if (match_clock &&
704                                             clock.p != match_clock->p)
705                                                 continue;
706
707                                         this_err = abs(clock.dot - target);
708                                         if (this_err < err) {
709                                                 *best_clock = clock;
710                                                 err = this_err;
711                                         }
712                                 }
713                         }
714                 }
715         }
716
717         return (err != target);
718 }
719
720 /*
721  * Returns a set of divisors for the desired target clock with the given
722  * refclk, or FALSE.  The returned values represent the clock equation:
723  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
724  *
725  * Target and reference clocks are specified in kHz.
726  *
727  * If match_clock is provided, then best_clock P divider must match the P
728  * divider from @match_clock used for LVDS downclocking.
729  */
730 static bool
731 pnv_find_best_dpll(const struct intel_limit *limit,
732                    struct intel_crtc_state *crtc_state,
733                    int target, int refclk, struct dpll *match_clock,
734                    struct dpll *best_clock)
735 {
736         struct drm_device *dev = crtc_state->base.crtc->dev;
737         struct dpll clock;
738         int err = target;
739
740         memset(best_clock, 0, sizeof(*best_clock));
741
742         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
743
744         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
745              clock.m1++) {
746                 for (clock.m2 = limit->m2.min;
747                      clock.m2 <= limit->m2.max; clock.m2++) {
748                         for (clock.n = limit->n.min;
749                              clock.n <= limit->n.max; clock.n++) {
750                                 for (clock.p1 = limit->p1.min;
751                                         clock.p1 <= limit->p1.max; clock.p1++) {
752                                         int this_err;
753
754                                         pnv_calc_dpll_params(refclk, &clock);
755                                         if (!intel_PLL_is_valid(dev, limit,
756                                                                 &clock))
757                                                 continue;
758                                         if (match_clock &&
759                                             clock.p != match_clock->p)
760                                                 continue;
761
762                                         this_err = abs(clock.dot - target);
763                                         if (this_err < err) {
764                                                 *best_clock = clock;
765                                                 err = this_err;
766                                         }
767                                 }
768                         }
769                 }
770         }
771
772         return (err != target);
773 }
774
775 /*
776  * Returns a set of divisors for the desired target clock with the given
777  * refclk, or FALSE.  The returned values represent the clock equation:
778  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
779  *
780  * Target and reference clocks are specified in kHz.
781  *
782  * If match_clock is provided, then best_clock P divider must match the P
783  * divider from @match_clock used for LVDS downclocking.
784  */
785 static bool
786 g4x_find_best_dpll(const struct intel_limit *limit,
787                    struct intel_crtc_state *crtc_state,
788                    int target, int refclk, struct dpll *match_clock,
789                    struct dpll *best_clock)
790 {
791         struct drm_device *dev = crtc_state->base.crtc->dev;
792         struct dpll clock;
793         int max_n;
794         bool found = false;
795         /* approximately equals target * 0.00585 */
796         int err_most = (target >> 8) + (target >> 9);
797
798         memset(best_clock, 0, sizeof(*best_clock));
799
800         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
801
802         max_n = limit->n.max;
803         /* based on hardware requirement, prefer smaller n to precision */
804         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
805                 /* based on hardware requirement, prefere larger m1,m2 */
806                 for (clock.m1 = limit->m1.max;
807                      clock.m1 >= limit->m1.min; clock.m1--) {
808                         for (clock.m2 = limit->m2.max;
809                              clock.m2 >= limit->m2.min; clock.m2--) {
810                                 for (clock.p1 = limit->p1.max;
811                                      clock.p1 >= limit->p1.min; clock.p1--) {
812                                         int this_err;
813
814                                         i9xx_calc_dpll_params(refclk, &clock);
815                                         if (!intel_PLL_is_valid(dev, limit,
816                                                                 &clock))
817                                                 continue;
818
819                                         this_err = abs(clock.dot - target);
820                                         if (this_err < err_most) {
821                                                 *best_clock = clock;
822                                                 err_most = this_err;
823                                                 max_n = clock.n;
824                                                 found = true;
825                                         }
826                                 }
827                         }
828                 }
829         }
830         return found;
831 }
832
833 /*
834  * Check if the calculated PLL configuration is more optimal compared to the
835  * best configuration and error found so far. Return the calculated error.
836  */
837 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
838                                const struct dpll *calculated_clock,
839                                const struct dpll *best_clock,
840                                unsigned int best_error_ppm,
841                                unsigned int *error_ppm)
842 {
843         /*
844          * For CHV ignore the error and consider only the P value.
845          * Prefer a bigger P value based on HW requirements.
846          */
847         if (IS_CHERRYVIEW(dev)) {
848                 *error_ppm = 0;
849
850                 return calculated_clock->p > best_clock->p;
851         }
852
853         if (WARN_ON_ONCE(!target_freq))
854                 return false;
855
856         *error_ppm = div_u64(1000000ULL *
857                                 abs(target_freq - calculated_clock->dot),
858                              target_freq);
859         /*
860          * Prefer a better P value over a better (smaller) error if the error
861          * is small. Ensure this preference for future configurations too by
862          * setting the error to 0.
863          */
864         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
865                 *error_ppm = 0;
866
867                 return true;
868         }
869
870         return *error_ppm + 10 < best_error_ppm;
871 }
872
873 /*
874  * Returns a set of divisors for the desired target clock with the given
875  * refclk, or FALSE.  The returned values represent the clock equation:
876  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
877  */
878 static bool
879 vlv_find_best_dpll(const struct intel_limit *limit,
880                    struct intel_crtc_state *crtc_state,
881                    int target, int refclk, struct dpll *match_clock,
882                    struct dpll *best_clock)
883 {
884         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
885         struct drm_device *dev = crtc->base.dev;
886         struct dpll clock;
887         unsigned int bestppm = 1000000;
888         /* min update 19.2 MHz */
889         int max_n = min(limit->n.max, refclk / 19200);
890         bool found = false;
891
892         target *= 5; /* fast clock */
893
894         memset(best_clock, 0, sizeof(*best_clock));
895
896         /* based on hardware requirement, prefer smaller n to precision */
897         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
898                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
899                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
900                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
901                                 clock.p = clock.p1 * clock.p2;
902                                 /* based on hardware requirement, prefer bigger m1,m2 values */
903                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
904                                         unsigned int ppm;
905
906                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
907                                                                      refclk * clock.m1);
908
909                                         vlv_calc_dpll_params(refclk, &clock);
910
911                                         if (!intel_PLL_is_valid(dev, limit,
912                                                                 &clock))
913                                                 continue;
914
915                                         if (!vlv_PLL_is_optimal(dev, target,
916                                                                 &clock,
917                                                                 best_clock,
918                                                                 bestppm, &ppm))
919                                                 continue;
920
921                                         *best_clock = clock;
922                                         bestppm = ppm;
923                                         found = true;
924                                 }
925                         }
926                 }
927         }
928
929         return found;
930 }
931
932 /*
933  * Returns a set of divisors for the desired target clock with the given
934  * refclk, or FALSE.  The returned values represent the clock equation:
935  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
936  */
937 static bool
938 chv_find_best_dpll(const struct intel_limit *limit,
939                    struct intel_crtc_state *crtc_state,
940                    int target, int refclk, struct dpll *match_clock,
941                    struct dpll *best_clock)
942 {
943         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
944         struct drm_device *dev = crtc->base.dev;
945         unsigned int best_error_ppm;
946         struct dpll clock;
947         uint64_t m2;
948         int found = false;
949
950         memset(best_clock, 0, sizeof(*best_clock));
951         best_error_ppm = 1000000;
952
953         /*
954          * Based on hardware doc, the n always set to 1, and m1 always
955          * set to 2.  If requires to support 200Mhz refclk, we need to
956          * revisit this because n may not 1 anymore.
957          */
958         clock.n = 1, clock.m1 = 2;
959         target *= 5;    /* fast clock */
960
961         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
962                 for (clock.p2 = limit->p2.p2_fast;
963                                 clock.p2 >= limit->p2.p2_slow;
964                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
965                         unsigned int error_ppm;
966
967                         clock.p = clock.p1 * clock.p2;
968
969                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
970                                         clock.n) << 22, refclk * clock.m1);
971
972                         if (m2 > INT_MAX/clock.m1)
973                                 continue;
974
975                         clock.m2 = m2;
976
977                         chv_calc_dpll_params(refclk, &clock);
978
979                         if (!intel_PLL_is_valid(dev, limit, &clock))
980                                 continue;
981
982                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
983                                                 best_error_ppm, &error_ppm))
984                                 continue;
985
986                         *best_clock = clock;
987                         best_error_ppm = error_ppm;
988                         found = true;
989                 }
990         }
991
992         return found;
993 }
994
995 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
996                         struct dpll *best_clock)
997 {
998         int refclk = 100000;
999         const struct intel_limit *limit = &intel_limits_bxt;
1000
1001         return chv_find_best_dpll(limit, crtc_state,
1002                                   target_clock, refclk, NULL, best_clock);
1003 }
1004
1005 bool intel_crtc_active(struct drm_crtc *crtc)
1006 {
1007         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1008
1009         /* Be paranoid as we can arrive here with only partial
1010          * state retrieved from the hardware during setup.
1011          *
1012          * We can ditch the adjusted_mode.crtc_clock check as soon
1013          * as Haswell has gained clock readout/fastboot support.
1014          *
1015          * We can ditch the crtc->primary->fb check as soon as we can
1016          * properly reconstruct framebuffers.
1017          *
1018          * FIXME: The intel_crtc->active here should be switched to
1019          * crtc->state->active once we have proper CRTC states wired up
1020          * for atomic.
1021          */
1022         return intel_crtc->active && crtc->primary->state->fb &&
1023                 intel_crtc->config->base.adjusted_mode.crtc_clock;
1024 }
1025
1026 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1027                                              enum pipe pipe)
1028 {
1029         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1030         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1031
1032         return intel_crtc->config->cpu_transcoder;
1033 }
1034
1035 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1036 {
1037         struct drm_i915_private *dev_priv = to_i915(dev);
1038         i915_reg_t reg = PIPEDSL(pipe);
1039         u32 line1, line2;
1040         u32 line_mask;
1041
1042         if (IS_GEN2(dev))
1043                 line_mask = DSL_LINEMASK_GEN2;
1044         else
1045                 line_mask = DSL_LINEMASK_GEN3;
1046
1047         line1 = I915_READ(reg) & line_mask;
1048         msleep(5);
1049         line2 = I915_READ(reg) & line_mask;
1050
1051         return line1 == line2;
1052 }
1053
1054 /*
1055  * intel_wait_for_pipe_off - wait for pipe to turn off
1056  * @crtc: crtc whose pipe to wait for
1057  *
1058  * After disabling a pipe, we can't wait for vblank in the usual way,
1059  * spinning on the vblank interrupt status bit, since we won't actually
1060  * see an interrupt when the pipe is disabled.
1061  *
1062  * On Gen4 and above:
1063  *   wait for the pipe register state bit to turn off
1064  *
1065  * Otherwise:
1066  *   wait for the display line value to settle (it usually
1067  *   ends up stopping at the start of the next frame).
1068  *
1069  */
1070 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1071 {
1072         struct drm_device *dev = crtc->base.dev;
1073         struct drm_i915_private *dev_priv = to_i915(dev);
1074         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1075         enum pipe pipe = crtc->pipe;
1076
1077         if (INTEL_INFO(dev)->gen >= 4) {
1078                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1079
1080                 /* Wait for the Pipe State to go off */
1081                 if (intel_wait_for_register(dev_priv,
1082                                             reg, I965_PIPECONF_ACTIVE, 0,
1083                                             100))
1084                         WARN(1, "pipe_off wait timed out\n");
1085         } else {
1086                 /* Wait for the display line to settle */
1087                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1088                         WARN(1, "pipe_off wait timed out\n");
1089         }
1090 }
1091
1092 /* Only for pre-ILK configs */
1093 void assert_pll(struct drm_i915_private *dev_priv,
1094                 enum pipe pipe, bool state)
1095 {
1096         u32 val;
1097         bool cur_state;
1098
1099         val = I915_READ(DPLL(pipe));
1100         cur_state = !!(val & DPLL_VCO_ENABLE);
1101         I915_STATE_WARN(cur_state != state,
1102              "PLL state assertion failure (expected %s, current %s)\n",
1103                         onoff(state), onoff(cur_state));
1104 }
1105
1106 /* XXX: the dsi pll is shared between MIPI DSI ports */
1107 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1108 {
1109         u32 val;
1110         bool cur_state;
1111
1112         mutex_lock(&dev_priv->sb_lock);
1113         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1114         mutex_unlock(&dev_priv->sb_lock);
1115
1116         cur_state = val & DSI_PLL_VCO_EN;
1117         I915_STATE_WARN(cur_state != state,
1118              "DSI PLL state assertion failure (expected %s, current %s)\n",
1119                         onoff(state), onoff(cur_state));
1120 }
1121
1122 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1123                           enum pipe pipe, bool state)
1124 {
1125         bool cur_state;
1126         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1127                                                                       pipe);
1128
1129         if (HAS_DDI(dev_priv)) {
1130                 /* DDI does not have a specific FDI_TX register */
1131                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1132                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1133         } else {
1134                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1135                 cur_state = !!(val & FDI_TX_ENABLE);
1136         }
1137         I915_STATE_WARN(cur_state != state,
1138              "FDI TX state assertion failure (expected %s, current %s)\n",
1139                         onoff(state), onoff(cur_state));
1140 }
1141 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1142 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1143
1144 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1145                           enum pipe pipe, bool state)
1146 {
1147         u32 val;
1148         bool cur_state;
1149
1150         val = I915_READ(FDI_RX_CTL(pipe));
1151         cur_state = !!(val & FDI_RX_ENABLE);
1152         I915_STATE_WARN(cur_state != state,
1153              "FDI RX state assertion failure (expected %s, current %s)\n",
1154                         onoff(state), onoff(cur_state));
1155 }
1156 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1157 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1158
1159 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1160                                       enum pipe pipe)
1161 {
1162         u32 val;
1163
1164         /* ILK FDI PLL is always enabled */
1165         if (IS_GEN5(dev_priv))
1166                 return;
1167
1168         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1169         if (HAS_DDI(dev_priv))
1170                 return;
1171
1172         val = I915_READ(FDI_TX_CTL(pipe));
1173         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1174 }
1175
1176 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1177                        enum pipe pipe, bool state)
1178 {
1179         u32 val;
1180         bool cur_state;
1181
1182         val = I915_READ(FDI_RX_CTL(pipe));
1183         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1184         I915_STATE_WARN(cur_state != state,
1185              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1186                         onoff(state), onoff(cur_state));
1187 }
1188
1189 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1190                            enum pipe pipe)
1191 {
1192         struct drm_device *dev = &dev_priv->drm;
1193         i915_reg_t pp_reg;
1194         u32 val;
1195         enum pipe panel_pipe = PIPE_A;
1196         bool locked = true;
1197
1198         if (WARN_ON(HAS_DDI(dev)))
1199                 return;
1200
1201         if (HAS_PCH_SPLIT(dev)) {
1202                 u32 port_sel;
1203
1204                 pp_reg = PCH_PP_CONTROL;
1205                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1206
1207                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1208                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1209                         panel_pipe = PIPE_B;
1210                 /* XXX: else fix for eDP */
1211         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1212                 /* presumably write lock depends on pipe, not port select */
1213                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1214                 panel_pipe = pipe;
1215         } else {
1216                 pp_reg = PP_CONTROL;
1217                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1218                         panel_pipe = PIPE_B;
1219         }
1220
1221         val = I915_READ(pp_reg);
1222         if (!(val & PANEL_POWER_ON) ||
1223             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1224                 locked = false;
1225
1226         I915_STATE_WARN(panel_pipe == pipe && locked,
1227              "panel assertion failure, pipe %c regs locked\n",
1228              pipe_name(pipe));
1229 }
1230
1231 static void assert_cursor(struct drm_i915_private *dev_priv,
1232                           enum pipe pipe, bool state)
1233 {
1234         struct drm_device *dev = &dev_priv->drm;
1235         bool cur_state;
1236
1237         if (IS_845G(dev) || IS_I865G(dev))
1238                 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1239         else
1240                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1241
1242         I915_STATE_WARN(cur_state != state,
1243              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1244                         pipe_name(pipe), onoff(state), onoff(cur_state));
1245 }
1246 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1247 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1248
1249 void assert_pipe(struct drm_i915_private *dev_priv,
1250                  enum pipe pipe, bool state)
1251 {
1252         bool cur_state;
1253         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1254                                                                       pipe);
1255         enum intel_display_power_domain power_domain;
1256
1257         /* if we need the pipe quirk it must be always on */
1258         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1259             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1260                 state = true;
1261
1262         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1263         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1264                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1265                 cur_state = !!(val & PIPECONF_ENABLE);
1266
1267                 intel_display_power_put(dev_priv, power_domain);
1268         } else {
1269                 cur_state = false;
1270         }
1271
1272         I915_STATE_WARN(cur_state != state,
1273              "pipe %c assertion failure (expected %s, current %s)\n",
1274                         pipe_name(pipe), onoff(state), onoff(cur_state));
1275 }
1276
1277 static void assert_plane(struct drm_i915_private *dev_priv,
1278                          enum plane plane, bool state)
1279 {
1280         u32 val;
1281         bool cur_state;
1282
1283         val = I915_READ(DSPCNTR(plane));
1284         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1285         I915_STATE_WARN(cur_state != state,
1286              "plane %c assertion failure (expected %s, current %s)\n",
1287                         plane_name(plane), onoff(state), onoff(cur_state));
1288 }
1289
1290 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1291 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1292
1293 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1294                                    enum pipe pipe)
1295 {
1296         struct drm_device *dev = &dev_priv->drm;
1297         int i;
1298
1299         /* Primary planes are fixed to pipes on gen4+ */
1300         if (INTEL_INFO(dev)->gen >= 4) {
1301                 u32 val = I915_READ(DSPCNTR(pipe));
1302                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1303                      "plane %c assertion failure, should be disabled but not\n",
1304                      plane_name(pipe));
1305                 return;
1306         }
1307
1308         /* Need to check both planes against the pipe */
1309         for_each_pipe(dev_priv, i) {
1310                 u32 val = I915_READ(DSPCNTR(i));
1311                 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1312                         DISPPLANE_SEL_PIPE_SHIFT;
1313                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1314                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1315                      plane_name(i), pipe_name(pipe));
1316         }
1317 }
1318
1319 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1320                                     enum pipe pipe)
1321 {
1322         struct drm_device *dev = &dev_priv->drm;
1323         int sprite;
1324
1325         if (INTEL_INFO(dev)->gen >= 9) {
1326                 for_each_sprite(dev_priv, pipe, sprite) {
1327                         u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1328                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1329                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1330                              sprite, pipe_name(pipe));
1331                 }
1332         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1333                 for_each_sprite(dev_priv, pipe, sprite) {
1334                         u32 val = I915_READ(SPCNTR(pipe, sprite));
1335                         I915_STATE_WARN(val & SP_ENABLE,
1336                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1337                              sprite_name(pipe, sprite), pipe_name(pipe));
1338                 }
1339         } else if (INTEL_INFO(dev)->gen >= 7) {
1340                 u32 val = I915_READ(SPRCTL(pipe));
1341                 I915_STATE_WARN(val & SPRITE_ENABLE,
1342                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1343                      plane_name(pipe), pipe_name(pipe));
1344         } else if (INTEL_INFO(dev)->gen >= 5) {
1345                 u32 val = I915_READ(DVSCNTR(pipe));
1346                 I915_STATE_WARN(val & DVS_ENABLE,
1347                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1348                      plane_name(pipe), pipe_name(pipe));
1349         }
1350 }
1351
1352 static void assert_vblank_disabled(struct drm_crtc *crtc)
1353 {
1354         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1355                 drm_crtc_vblank_put(crtc);
1356 }
1357
1358 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1359                                     enum pipe pipe)
1360 {
1361         u32 val;
1362         bool enabled;
1363
1364         val = I915_READ(PCH_TRANSCONF(pipe));
1365         enabled = !!(val & TRANS_ENABLE);
1366         I915_STATE_WARN(enabled,
1367              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1368              pipe_name(pipe));
1369 }
1370
1371 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1372                             enum pipe pipe, u32 port_sel, u32 val)
1373 {
1374         if ((val & DP_PORT_EN) == 0)
1375                 return false;
1376
1377         if (HAS_PCH_CPT(dev_priv)) {
1378                 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1379                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1380                         return false;
1381         } else if (IS_CHERRYVIEW(dev_priv)) {
1382                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1383                         return false;
1384         } else {
1385                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1386                         return false;
1387         }
1388         return true;
1389 }
1390
1391 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1392                               enum pipe pipe, u32 val)
1393 {
1394         if ((val & SDVO_ENABLE) == 0)
1395                 return false;
1396
1397         if (HAS_PCH_CPT(dev_priv)) {
1398                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1399                         return false;
1400         } else if (IS_CHERRYVIEW(dev_priv)) {
1401                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1402                         return false;
1403         } else {
1404                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1405                         return false;
1406         }
1407         return true;
1408 }
1409
1410 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1411                               enum pipe pipe, u32 val)
1412 {
1413         if ((val & LVDS_PORT_EN) == 0)
1414                 return false;
1415
1416         if (HAS_PCH_CPT(dev_priv)) {
1417                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1418                         return false;
1419         } else {
1420                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1421                         return false;
1422         }
1423         return true;
1424 }
1425
1426 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1427                               enum pipe pipe, u32 val)
1428 {
1429         if ((val & ADPA_DAC_ENABLE) == 0)
1430                 return false;
1431         if (HAS_PCH_CPT(dev_priv)) {
1432                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1433                         return false;
1434         } else {
1435                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1436                         return false;
1437         }
1438         return true;
1439 }
1440
1441 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1442                                    enum pipe pipe, i915_reg_t reg,
1443                                    u32 port_sel)
1444 {
1445         u32 val = I915_READ(reg);
1446         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1447              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1448              i915_mmio_reg_offset(reg), pipe_name(pipe));
1449
1450         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1451              && (val & DP_PIPEB_SELECT),
1452              "IBX PCH dp port still using transcoder B\n");
1453 }
1454
1455 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1456                                      enum pipe pipe, i915_reg_t reg)
1457 {
1458         u32 val = I915_READ(reg);
1459         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1460              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1461              i915_mmio_reg_offset(reg), pipe_name(pipe));
1462
1463         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1464              && (val & SDVO_PIPE_B_SELECT),
1465              "IBX PCH hdmi port still using transcoder B\n");
1466 }
1467
1468 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1469                                       enum pipe pipe)
1470 {
1471         u32 val;
1472
1473         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1474         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1475         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1476
1477         val = I915_READ(PCH_ADPA);
1478         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1479              "PCH VGA enabled on transcoder %c, should be disabled\n",
1480              pipe_name(pipe));
1481
1482         val = I915_READ(PCH_LVDS);
1483         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1484              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1485              pipe_name(pipe));
1486
1487         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1488         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1489         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1490 }
1491
1492 static void _vlv_enable_pll(struct intel_crtc *crtc,
1493                             const struct intel_crtc_state *pipe_config)
1494 {
1495         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1496         enum pipe pipe = crtc->pipe;
1497
1498         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1499         POSTING_READ(DPLL(pipe));
1500         udelay(150);
1501
1502         if (intel_wait_for_register(dev_priv,
1503                                     DPLL(pipe),
1504                                     DPLL_LOCK_VLV,
1505                                     DPLL_LOCK_VLV,
1506                                     1))
1507                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1508 }
1509
1510 static void vlv_enable_pll(struct intel_crtc *crtc,
1511                            const struct intel_crtc_state *pipe_config)
1512 {
1513         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1514         enum pipe pipe = crtc->pipe;
1515
1516         assert_pipe_disabled(dev_priv, pipe);
1517
1518         /* PLL is protected by panel, make sure we can write it */
1519         assert_panel_unlocked(dev_priv, pipe);
1520
1521         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1522                 _vlv_enable_pll(crtc, pipe_config);
1523
1524         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1525         POSTING_READ(DPLL_MD(pipe));
1526 }
1527
1528
1529 static void _chv_enable_pll(struct intel_crtc *crtc,
1530                             const struct intel_crtc_state *pipe_config)
1531 {
1532         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1533         enum pipe pipe = crtc->pipe;
1534         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1535         u32 tmp;
1536
1537         mutex_lock(&dev_priv->sb_lock);
1538
1539         /* Enable back the 10bit clock to display controller */
1540         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1541         tmp |= DPIO_DCLKP_EN;
1542         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1543
1544         mutex_unlock(&dev_priv->sb_lock);
1545
1546         /*
1547          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1548          */
1549         udelay(1);
1550
1551         /* Enable PLL */
1552         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1553
1554         /* Check PLL is locked */
1555         if (intel_wait_for_register(dev_priv,
1556                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1557                                     1))
1558                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1559 }
1560
1561 static void chv_enable_pll(struct intel_crtc *crtc,
1562                            const struct intel_crtc_state *pipe_config)
1563 {
1564         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1565         enum pipe pipe = crtc->pipe;
1566
1567         assert_pipe_disabled(dev_priv, pipe);
1568
1569         /* PLL is protected by panel, make sure we can write it */
1570         assert_panel_unlocked(dev_priv, pipe);
1571
1572         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1573                 _chv_enable_pll(crtc, pipe_config);
1574
1575         if (pipe != PIPE_A) {
1576                 /*
1577                  * WaPixelRepeatModeFixForC0:chv
1578                  *
1579                  * DPLLCMD is AWOL. Use chicken bits to propagate
1580                  * the value from DPLLBMD to either pipe B or C.
1581                  */
1582                 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1583                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1584                 I915_WRITE(CBR4_VLV, 0);
1585                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1586
1587                 /*
1588                  * DPLLB VGA mode also seems to cause problems.
1589                  * We should always have it disabled.
1590                  */
1591                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1592         } else {
1593                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1594                 POSTING_READ(DPLL_MD(pipe));
1595         }
1596 }
1597
1598 static int intel_num_dvo_pipes(struct drm_device *dev)
1599 {
1600         struct intel_crtc *crtc;
1601         int count = 0;
1602
1603         for_each_intel_crtc(dev, crtc) {
1604                 count += crtc->base.state->active &&
1605                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1606         }
1607
1608         return count;
1609 }
1610
1611 static void i9xx_enable_pll(struct intel_crtc *crtc)
1612 {
1613         struct drm_device *dev = crtc->base.dev;
1614         struct drm_i915_private *dev_priv = to_i915(dev);
1615         i915_reg_t reg = DPLL(crtc->pipe);
1616         u32 dpll = crtc->config->dpll_hw_state.dpll;
1617
1618         assert_pipe_disabled(dev_priv, crtc->pipe);
1619
1620         /* PLL is protected by panel, make sure we can write it */
1621         if (IS_MOBILE(dev) && !IS_I830(dev))
1622                 assert_panel_unlocked(dev_priv, crtc->pipe);
1623
1624         /* Enable DVO 2x clock on both PLLs if necessary */
1625         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1626                 /*
1627                  * It appears to be important that we don't enable this
1628                  * for the current pipe before otherwise configuring the
1629                  * PLL. No idea how this should be handled if multiple
1630                  * DVO outputs are enabled simultaneosly.
1631                  */
1632                 dpll |= DPLL_DVO_2X_MODE;
1633                 I915_WRITE(DPLL(!crtc->pipe),
1634                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1635         }
1636
1637         /*
1638          * Apparently we need to have VGA mode enabled prior to changing
1639          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1640          * dividers, even though the register value does change.
1641          */
1642         I915_WRITE(reg, 0);
1643
1644         I915_WRITE(reg, dpll);
1645
1646         /* Wait for the clocks to stabilize. */
1647         POSTING_READ(reg);
1648         udelay(150);
1649
1650         if (INTEL_INFO(dev)->gen >= 4) {
1651                 I915_WRITE(DPLL_MD(crtc->pipe),
1652                            crtc->config->dpll_hw_state.dpll_md);
1653         } else {
1654                 /* The pixel multiplier can only be updated once the
1655                  * DPLL is enabled and the clocks are stable.
1656                  *
1657                  * So write it again.
1658                  */
1659                 I915_WRITE(reg, dpll);
1660         }
1661
1662         /* We do this three times for luck */
1663         I915_WRITE(reg, dpll);
1664         POSTING_READ(reg);
1665         udelay(150); /* wait for warmup */
1666         I915_WRITE(reg, dpll);
1667         POSTING_READ(reg);
1668         udelay(150); /* wait for warmup */
1669         I915_WRITE(reg, dpll);
1670         POSTING_READ(reg);
1671         udelay(150); /* wait for warmup */
1672 }
1673
1674 /**
1675  * i9xx_disable_pll - disable a PLL
1676  * @dev_priv: i915 private structure
1677  * @pipe: pipe PLL to disable
1678  *
1679  * Disable the PLL for @pipe, making sure the pipe is off first.
1680  *
1681  * Note!  This is for pre-ILK only.
1682  */
1683 static void i9xx_disable_pll(struct intel_crtc *crtc)
1684 {
1685         struct drm_device *dev = crtc->base.dev;
1686         struct drm_i915_private *dev_priv = to_i915(dev);
1687         enum pipe pipe = crtc->pipe;
1688
1689         /* Disable DVO 2x clock on both PLLs if necessary */
1690         if (IS_I830(dev) &&
1691             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1692             !intel_num_dvo_pipes(dev)) {
1693                 I915_WRITE(DPLL(PIPE_B),
1694                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1695                 I915_WRITE(DPLL(PIPE_A),
1696                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1697         }
1698
1699         /* Don't disable pipe or pipe PLLs if needed */
1700         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1701             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1702                 return;
1703
1704         /* Make sure the pipe isn't still relying on us */
1705         assert_pipe_disabled(dev_priv, pipe);
1706
1707         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1708         POSTING_READ(DPLL(pipe));
1709 }
1710
1711 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 {
1713         u32 val;
1714
1715         /* Make sure the pipe isn't still relying on us */
1716         assert_pipe_disabled(dev_priv, pipe);
1717
1718         val = DPLL_INTEGRATED_REF_CLK_VLV |
1719                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1720         if (pipe != PIPE_A)
1721                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1722
1723         I915_WRITE(DPLL(pipe), val);
1724         POSTING_READ(DPLL(pipe));
1725 }
1726
1727 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1728 {
1729         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1730         u32 val;
1731
1732         /* Make sure the pipe isn't still relying on us */
1733         assert_pipe_disabled(dev_priv, pipe);
1734
1735         val = DPLL_SSC_REF_CLK_CHV |
1736                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1737         if (pipe != PIPE_A)
1738                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1739
1740         I915_WRITE(DPLL(pipe), val);
1741         POSTING_READ(DPLL(pipe));
1742
1743         mutex_lock(&dev_priv->sb_lock);
1744
1745         /* Disable 10bit clock to display controller */
1746         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1747         val &= ~DPIO_DCLKP_EN;
1748         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1749
1750         mutex_unlock(&dev_priv->sb_lock);
1751 }
1752
1753 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1754                          struct intel_digital_port *dport,
1755                          unsigned int expected_mask)
1756 {
1757         u32 port_mask;
1758         i915_reg_t dpll_reg;
1759
1760         switch (dport->port) {
1761         case PORT_B:
1762                 port_mask = DPLL_PORTB_READY_MASK;
1763                 dpll_reg = DPLL(0);
1764                 break;
1765         case PORT_C:
1766                 port_mask = DPLL_PORTC_READY_MASK;
1767                 dpll_reg = DPLL(0);
1768                 expected_mask <<= 4;
1769                 break;
1770         case PORT_D:
1771                 port_mask = DPLL_PORTD_READY_MASK;
1772                 dpll_reg = DPIO_PHY_STATUS;
1773                 break;
1774         default:
1775                 BUG();
1776         }
1777
1778         if (intel_wait_for_register(dev_priv,
1779                                     dpll_reg, port_mask, expected_mask,
1780                                     1000))
1781                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1782                      port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1783 }
1784
1785 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1786                                            enum pipe pipe)
1787 {
1788         struct drm_device *dev = &dev_priv->drm;
1789         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1790         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1791         i915_reg_t reg;
1792         uint32_t val, pipeconf_val;
1793
1794         /* Make sure PCH DPLL is enabled */
1795         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1796
1797         /* FDI must be feeding us bits for PCH ports */
1798         assert_fdi_tx_enabled(dev_priv, pipe);
1799         assert_fdi_rx_enabled(dev_priv, pipe);
1800
1801         if (HAS_PCH_CPT(dev)) {
1802                 /* Workaround: Set the timing override bit before enabling the
1803                  * pch transcoder. */
1804                 reg = TRANS_CHICKEN2(pipe);
1805                 val = I915_READ(reg);
1806                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1807                 I915_WRITE(reg, val);
1808         }
1809
1810         reg = PCH_TRANSCONF(pipe);
1811         val = I915_READ(reg);
1812         pipeconf_val = I915_READ(PIPECONF(pipe));
1813
1814         if (HAS_PCH_IBX(dev_priv)) {
1815                 /*
1816                  * Make the BPC in transcoder be consistent with
1817                  * that in pipeconf reg. For HDMI we must use 8bpc
1818                  * here for both 8bpc and 12bpc.
1819                  */
1820                 val &= ~PIPECONF_BPC_MASK;
1821                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1822                         val |= PIPECONF_8BPC;
1823                 else
1824                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1825         }
1826
1827         val &= ~TRANS_INTERLACE_MASK;
1828         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1829                 if (HAS_PCH_IBX(dev_priv) &&
1830                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1831                         val |= TRANS_LEGACY_INTERLACED_ILK;
1832                 else
1833                         val |= TRANS_INTERLACED;
1834         else
1835                 val |= TRANS_PROGRESSIVE;
1836
1837         I915_WRITE(reg, val | TRANS_ENABLE);
1838         if (intel_wait_for_register(dev_priv,
1839                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1840                                     100))
1841                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1842 }
1843
1844 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1845                                       enum transcoder cpu_transcoder)
1846 {
1847         u32 val, pipeconf_val;
1848
1849         /* FDI must be feeding us bits for PCH ports */
1850         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1851         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1852
1853         /* Workaround: set timing override bit. */
1854         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1855         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1856         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1857
1858         val = TRANS_ENABLE;
1859         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1860
1861         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1862             PIPECONF_INTERLACED_ILK)
1863                 val |= TRANS_INTERLACED;
1864         else
1865                 val |= TRANS_PROGRESSIVE;
1866
1867         I915_WRITE(LPT_TRANSCONF, val);
1868         if (intel_wait_for_register(dev_priv,
1869                                     LPT_TRANSCONF,
1870                                     TRANS_STATE_ENABLE,
1871                                     TRANS_STATE_ENABLE,
1872                                     100))
1873                 DRM_ERROR("Failed to enable PCH transcoder\n");
1874 }
1875
1876 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1877                                             enum pipe pipe)
1878 {
1879         struct drm_device *dev = &dev_priv->drm;
1880         i915_reg_t reg;
1881         uint32_t val;
1882
1883         /* FDI relies on the transcoder */
1884         assert_fdi_tx_disabled(dev_priv, pipe);
1885         assert_fdi_rx_disabled(dev_priv, pipe);
1886
1887         /* Ports must be off as well */
1888         assert_pch_ports_disabled(dev_priv, pipe);
1889
1890         reg = PCH_TRANSCONF(pipe);
1891         val = I915_READ(reg);
1892         val &= ~TRANS_ENABLE;
1893         I915_WRITE(reg, val);
1894         /* wait for PCH transcoder off, transcoder state */
1895         if (intel_wait_for_register(dev_priv,
1896                                     reg, TRANS_STATE_ENABLE, 0,
1897                                     50))
1898                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1899
1900         if (HAS_PCH_CPT(dev)) {
1901                 /* Workaround: Clear the timing override chicken bit again. */
1902                 reg = TRANS_CHICKEN2(pipe);
1903                 val = I915_READ(reg);
1904                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1905                 I915_WRITE(reg, val);
1906         }
1907 }
1908
1909 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1910 {
1911         u32 val;
1912
1913         val = I915_READ(LPT_TRANSCONF);
1914         val &= ~TRANS_ENABLE;
1915         I915_WRITE(LPT_TRANSCONF, val);
1916         /* wait for PCH transcoder off, transcoder state */
1917         if (intel_wait_for_register(dev_priv,
1918                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1919                                     50))
1920                 DRM_ERROR("Failed to disable PCH transcoder\n");
1921
1922         /* Workaround: clear timing override bit. */
1923         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1924         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1925         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1926 }
1927
1928 /**
1929  * intel_enable_pipe - enable a pipe, asserting requirements
1930  * @crtc: crtc responsible for the pipe
1931  *
1932  * Enable @crtc's pipe, making sure that various hardware specific requirements
1933  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1934  */
1935 static void intel_enable_pipe(struct intel_crtc *crtc)
1936 {
1937         struct drm_device *dev = crtc->base.dev;
1938         struct drm_i915_private *dev_priv = to_i915(dev);
1939         enum pipe pipe = crtc->pipe;
1940         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1941         enum pipe pch_transcoder;
1942         i915_reg_t reg;
1943         u32 val;
1944
1945         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1946
1947         assert_planes_disabled(dev_priv, pipe);
1948         assert_cursor_disabled(dev_priv, pipe);
1949         assert_sprites_disabled(dev_priv, pipe);
1950
1951         if (HAS_PCH_LPT(dev_priv))
1952                 pch_transcoder = TRANSCODER_A;
1953         else
1954                 pch_transcoder = pipe;
1955
1956         /*
1957          * A pipe without a PLL won't actually be able to drive bits from
1958          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1959          * need the check.
1960          */
1961         if (HAS_GMCH_DISPLAY(dev_priv))
1962                 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
1963                         assert_dsi_pll_enabled(dev_priv);
1964                 else
1965                         assert_pll_enabled(dev_priv, pipe);
1966         else {
1967                 if (crtc->config->has_pch_encoder) {
1968                         /* if driving the PCH, we need FDI enabled */
1969                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1970                         assert_fdi_tx_pll_enabled(dev_priv,
1971                                                   (enum pipe) cpu_transcoder);
1972                 }
1973                 /* FIXME: assert CPU port conditions for SNB+ */
1974         }
1975
1976         reg = PIPECONF(cpu_transcoder);
1977         val = I915_READ(reg);
1978         if (val & PIPECONF_ENABLE) {
1979                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1980                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
1981                 return;
1982         }
1983
1984         I915_WRITE(reg, val | PIPECONF_ENABLE);
1985         POSTING_READ(reg);
1986
1987         /*
1988          * Until the pipe starts DSL will read as 0, which would cause
1989          * an apparent vblank timestamp jump, which messes up also the
1990          * frame count when it's derived from the timestamps. So let's
1991          * wait for the pipe to start properly before we call
1992          * drm_crtc_vblank_on()
1993          */
1994         if (dev->max_vblank_count == 0 &&
1995             wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
1996                 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
1997 }
1998
1999 /**
2000  * intel_disable_pipe - disable a pipe, asserting requirements
2001  * @crtc: crtc whose pipes is to be disabled
2002  *
2003  * Disable the pipe of @crtc, making sure that various hardware
2004  * specific requirements are met, if applicable, e.g. plane
2005  * disabled, panel fitter off, etc.
2006  *
2007  * Will wait until the pipe has shut down before returning.
2008  */
2009 static void intel_disable_pipe(struct intel_crtc *crtc)
2010 {
2011         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2012         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2013         enum pipe pipe = crtc->pipe;
2014         i915_reg_t reg;
2015         u32 val;
2016
2017         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2018
2019         /*
2020          * Make sure planes won't keep trying to pump pixels to us,
2021          * or we might hang the display.
2022          */
2023         assert_planes_disabled(dev_priv, pipe);
2024         assert_cursor_disabled(dev_priv, pipe);
2025         assert_sprites_disabled(dev_priv, pipe);
2026
2027         reg = PIPECONF(cpu_transcoder);
2028         val = I915_READ(reg);
2029         if ((val & PIPECONF_ENABLE) == 0)
2030                 return;
2031
2032         /*
2033          * Double wide has implications for planes
2034          * so best keep it disabled when not needed.
2035          */
2036         if (crtc->config->double_wide)
2037                 val &= ~PIPECONF_DOUBLE_WIDE;
2038
2039         /* Don't disable pipe or pipe PLLs if needed */
2040         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2041             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2042                 val &= ~PIPECONF_ENABLE;
2043
2044         I915_WRITE(reg, val);
2045         if ((val & PIPECONF_ENABLE) == 0)
2046                 intel_wait_for_pipe_off(crtc);
2047 }
2048
2049 static bool need_vtd_wa(struct drm_device *dev)
2050 {
2051 #ifdef CONFIG_INTEL_IOMMU
2052         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2053                 return true;
2054 #endif
2055         return false;
2056 }
2057
2058 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2059 {
2060         return IS_GEN2(dev_priv) ? 2048 : 4096;
2061 }
2062
2063 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2064                                            uint64_t fb_modifier, unsigned int cpp)
2065 {
2066         switch (fb_modifier) {
2067         case DRM_FORMAT_MOD_NONE:
2068                 return cpp;
2069         case I915_FORMAT_MOD_X_TILED:
2070                 if (IS_GEN2(dev_priv))
2071                         return 128;
2072                 else
2073                         return 512;
2074         case I915_FORMAT_MOD_Y_TILED:
2075                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2076                         return 128;
2077                 else
2078                         return 512;
2079         case I915_FORMAT_MOD_Yf_TILED:
2080                 switch (cpp) {
2081                 case 1:
2082                         return 64;
2083                 case 2:
2084                 case 4:
2085                         return 128;
2086                 case 8:
2087                 case 16:
2088                         return 256;
2089                 default:
2090                         MISSING_CASE(cpp);
2091                         return cpp;
2092                 }
2093                 break;
2094         default:
2095                 MISSING_CASE(fb_modifier);
2096                 return cpp;
2097         }
2098 }
2099
2100 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2101                                uint64_t fb_modifier, unsigned int cpp)
2102 {
2103         if (fb_modifier == DRM_FORMAT_MOD_NONE)
2104                 return 1;
2105         else
2106                 return intel_tile_size(dev_priv) /
2107                         intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2108 }
2109
2110 /* Return the tile dimensions in pixel units */
2111 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2112                             unsigned int *tile_width,
2113                             unsigned int *tile_height,
2114                             uint64_t fb_modifier,
2115                             unsigned int cpp)
2116 {
2117         unsigned int tile_width_bytes =
2118                 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2119
2120         *tile_width = tile_width_bytes / cpp;
2121         *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2122 }
2123
2124 unsigned int
2125 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2126                       uint32_t pixel_format, uint64_t fb_modifier)
2127 {
2128         unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2129         unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2130
2131         return ALIGN(height, tile_height);
2132 }
2133
2134 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2135 {
2136         unsigned int size = 0;
2137         int i;
2138
2139         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2140                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2141
2142         return size;
2143 }
2144
2145 static void
2146 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2147                         const struct drm_framebuffer *fb,
2148                         unsigned int rotation)
2149 {
2150         if (intel_rotation_90_or_270(rotation)) {
2151                 *view = i915_ggtt_view_rotated;
2152                 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2153         } else {
2154                 *view = i915_ggtt_view_normal;
2155         }
2156 }
2157
2158 static void
2159 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2160                    struct drm_framebuffer *fb)
2161 {
2162         struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2163         unsigned int tile_size, tile_width, tile_height, cpp;
2164
2165         tile_size = intel_tile_size(dev_priv);
2166
2167         cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2168         intel_tile_dims(dev_priv, &tile_width, &tile_height,
2169                         fb->modifier[0], cpp);
2170
2171         info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2172         info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2173
2174         if (info->pixel_format == DRM_FORMAT_NV12) {
2175                 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2176                 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2177                                 fb->modifier[1], cpp);
2178
2179                 info->uv_offset = fb->offsets[1];
2180                 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2181                 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2182         }
2183 }
2184
2185 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2186 {
2187         if (INTEL_INFO(dev_priv)->gen >= 9)
2188                 return 256 * 1024;
2189         else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2190                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2191                 return 128 * 1024;
2192         else if (INTEL_INFO(dev_priv)->gen >= 4)
2193                 return 4 * 1024;
2194         else
2195                 return 0;
2196 }
2197
2198 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2199                                          uint64_t fb_modifier)
2200 {
2201         switch (fb_modifier) {
2202         case DRM_FORMAT_MOD_NONE:
2203                 return intel_linear_alignment(dev_priv);
2204         case I915_FORMAT_MOD_X_TILED:
2205                 if (INTEL_INFO(dev_priv)->gen >= 9)
2206                         return 256 * 1024;
2207                 return 0;
2208         case I915_FORMAT_MOD_Y_TILED:
2209         case I915_FORMAT_MOD_Yf_TILED:
2210                 return 1 * 1024 * 1024;
2211         default:
2212                 MISSING_CASE(fb_modifier);
2213                 return 0;
2214         }
2215 }
2216
2217 int
2218 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2219                            unsigned int rotation)
2220 {
2221         struct drm_device *dev = fb->dev;
2222         struct drm_i915_private *dev_priv = to_i915(dev);
2223         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2224         struct i915_ggtt_view view;
2225         u32 alignment;
2226         int ret;
2227
2228         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2229
2230         alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2231
2232         intel_fill_fb_ggtt_view(&view, fb, rotation);
2233
2234         /* Note that the w/a also requires 64 PTE of padding following the
2235          * bo. We currently fill all unused PTE with the shadow page and so
2236          * we should always have valid PTE following the scanout preventing
2237          * the VT-d warning.
2238          */
2239         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2240                 alignment = 256 * 1024;
2241
2242         /*
2243          * Global gtt pte registers are special registers which actually forward
2244          * writes to a chunk of system memory. Which means that there is no risk
2245          * that the register values disappear as soon as we call
2246          * intel_runtime_pm_put(), so it is correct to wrap only the
2247          * pin/unpin/fence and not more.
2248          */
2249         intel_runtime_pm_get(dev_priv);
2250
2251         ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2252                                                    &view);
2253         if (ret)
2254                 goto err_pm;
2255
2256         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2257          * fence, whereas 965+ only requires a fence if using
2258          * framebuffer compression.  For simplicity, we always install
2259          * a fence as the cost is not that onerous.
2260          */
2261         if (view.type == I915_GGTT_VIEW_NORMAL) {
2262                 ret = i915_gem_object_get_fence(obj);
2263                 if (ret == -EDEADLK) {
2264                         /*
2265                          * -EDEADLK means there are no free fences
2266                          * no pending flips.
2267                          *
2268                          * This is propagated to atomic, but it uses
2269                          * -EDEADLK to force a locking recovery, so
2270                          * change the returned error to -EBUSY.
2271                          */
2272                         ret = -EBUSY;
2273                         goto err_unpin;
2274                 } else if (ret)
2275                         goto err_unpin;
2276
2277                 i915_gem_object_pin_fence(obj);
2278         }
2279
2280         intel_runtime_pm_put(dev_priv);
2281         return 0;
2282
2283 err_unpin:
2284         i915_gem_object_unpin_from_display_plane(obj, &view);
2285 err_pm:
2286         intel_runtime_pm_put(dev_priv);
2287         return ret;
2288 }
2289
2290 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2291 {
2292         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2293         struct i915_ggtt_view view;
2294
2295         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2296
2297         intel_fill_fb_ggtt_view(&view, fb, rotation);
2298
2299         if (view.type == I915_GGTT_VIEW_NORMAL)
2300                 i915_gem_object_unpin_fence(obj);
2301
2302         i915_gem_object_unpin_from_display_plane(obj, &view);
2303 }
2304
2305 /*
2306  * Adjust the tile offset by moving the difference into
2307  * the x/y offsets.
2308  *
2309  * Input tile dimensions and pitch must already be
2310  * rotated to match x and y, and in pixel units.
2311  */
2312 static u32 intel_adjust_tile_offset(int *x, int *y,
2313                                     unsigned int tile_width,
2314                                     unsigned int tile_height,
2315                                     unsigned int tile_size,
2316                                     unsigned int pitch_tiles,
2317                                     u32 old_offset,
2318                                     u32 new_offset)
2319 {
2320         unsigned int tiles;
2321
2322         WARN_ON(old_offset & (tile_size - 1));
2323         WARN_ON(new_offset & (tile_size - 1));
2324         WARN_ON(new_offset > old_offset);
2325
2326         tiles = (old_offset - new_offset) / tile_size;
2327
2328         *y += tiles / pitch_tiles * tile_height;
2329         *x += tiles % pitch_tiles * tile_width;
2330
2331         return new_offset;
2332 }
2333
2334 /*
2335  * Computes the linear offset to the base tile and adjusts
2336  * x, y. bytes per pixel is assumed to be a power-of-two.
2337  *
2338  * In the 90/270 rotated case, x and y are assumed
2339  * to be already rotated to match the rotated GTT view, and
2340  * pitch is the tile_height aligned framebuffer height.
2341  */
2342 u32 intel_compute_tile_offset(int *x, int *y,
2343                               const struct drm_framebuffer *fb, int plane,
2344                               unsigned int pitch,
2345                               unsigned int rotation)
2346 {
2347         const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2348         uint64_t fb_modifier = fb->modifier[plane];
2349         unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2350         u32 offset, offset_aligned, alignment;
2351
2352         alignment = intel_surf_alignment(dev_priv, fb_modifier);
2353         if (alignment)
2354                 alignment--;
2355
2356         if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2357                 unsigned int tile_size, tile_width, tile_height;
2358                 unsigned int tile_rows, tiles, pitch_tiles;
2359
2360                 tile_size = intel_tile_size(dev_priv);
2361                 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2362                                 fb_modifier, cpp);
2363
2364                 if (intel_rotation_90_or_270(rotation)) {
2365                         pitch_tiles = pitch / tile_height;
2366                         swap(tile_width, tile_height);
2367                 } else {
2368                         pitch_tiles = pitch / (tile_width * cpp);
2369                 }
2370
2371                 tile_rows = *y / tile_height;
2372                 *y %= tile_height;
2373
2374                 tiles = *x / tile_width;
2375                 *x %= tile_width;
2376
2377                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2378                 offset_aligned = offset & ~alignment;
2379
2380                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2381                                          tile_size, pitch_tiles,
2382                                          offset, offset_aligned);
2383         } else {
2384                 offset = *y * pitch + *x * cpp;
2385                 offset_aligned = offset & ~alignment;
2386
2387                 *y = (offset & alignment) / pitch;
2388                 *x = ((offset & alignment) - *y * pitch) / cpp;
2389         }
2390
2391         return offset_aligned;
2392 }
2393
2394 static int i9xx_format_to_fourcc(int format)
2395 {
2396         switch (format) {
2397         case DISPPLANE_8BPP:
2398                 return DRM_FORMAT_C8;
2399         case DISPPLANE_BGRX555:
2400                 return DRM_FORMAT_XRGB1555;
2401         case DISPPLANE_BGRX565:
2402                 return DRM_FORMAT_RGB565;
2403         default:
2404         case DISPPLANE_BGRX888:
2405                 return DRM_FORMAT_XRGB8888;
2406         case DISPPLANE_RGBX888:
2407                 return DRM_FORMAT_XBGR8888;
2408         case DISPPLANE_BGRX101010:
2409                 return DRM_FORMAT_XRGB2101010;
2410         case DISPPLANE_RGBX101010:
2411                 return DRM_FORMAT_XBGR2101010;
2412         }
2413 }
2414
2415 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2416 {
2417         switch (format) {
2418         case PLANE_CTL_FORMAT_RGB_565:
2419                 return DRM_FORMAT_RGB565;
2420         default:
2421         case PLANE_CTL_FORMAT_XRGB_8888:
2422                 if (rgb_order) {
2423                         if (alpha)
2424                                 return DRM_FORMAT_ABGR8888;
2425                         else
2426                                 return DRM_FORMAT_XBGR8888;
2427                 } else {
2428                         if (alpha)
2429                                 return DRM_FORMAT_ARGB8888;
2430                         else
2431                                 return DRM_FORMAT_XRGB8888;
2432                 }
2433         case PLANE_CTL_FORMAT_XRGB_2101010:
2434                 if (rgb_order)
2435                         return DRM_FORMAT_XBGR2101010;
2436                 else
2437                         return DRM_FORMAT_XRGB2101010;
2438         }
2439 }
2440
2441 static bool
2442 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2443                               struct intel_initial_plane_config *plane_config)
2444 {
2445         struct drm_device *dev = crtc->base.dev;
2446         struct drm_i915_private *dev_priv = to_i915(dev);
2447         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2448         struct drm_i915_gem_object *obj = NULL;
2449         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2450         struct drm_framebuffer *fb = &plane_config->fb->base;
2451         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2452         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2453                                     PAGE_SIZE);
2454
2455         size_aligned -= base_aligned;
2456
2457         if (plane_config->size == 0)
2458                 return false;
2459
2460         /* If the FB is too big, just don't use it since fbdev is not very
2461          * important and we should probably use that space with FBC or other
2462          * features. */
2463         if (size_aligned * 2 > ggtt->stolen_usable_size)
2464                 return false;
2465
2466         mutex_lock(&dev->struct_mutex);
2467
2468         obj = i915_gem_object_create_stolen_for_preallocated(dev,
2469                                                              base_aligned,
2470                                                              base_aligned,
2471                                                              size_aligned);
2472         if (!obj) {
2473                 mutex_unlock(&dev->struct_mutex);
2474                 return false;
2475         }
2476
2477         obj->tiling_mode = plane_config->tiling;
2478         if (obj->tiling_mode == I915_TILING_X)
2479                 obj->stride = fb->pitches[0];
2480
2481         mode_cmd.pixel_format = fb->pixel_format;
2482         mode_cmd.width = fb->width;
2483         mode_cmd.height = fb->height;
2484         mode_cmd.pitches[0] = fb->pitches[0];
2485         mode_cmd.modifier[0] = fb->modifier[0];
2486         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2487
2488         if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2489                                    &mode_cmd, obj)) {
2490                 DRM_DEBUG_KMS("intel fb init failed\n");
2491                 goto out_unref_obj;
2492         }
2493
2494         mutex_unlock(&dev->struct_mutex);
2495
2496         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2497         return true;
2498
2499 out_unref_obj:
2500         drm_gem_object_unreference(&obj->base);
2501         mutex_unlock(&dev->struct_mutex);
2502         return false;
2503 }
2504
2505 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2506 static void
2507 update_state_fb(struct drm_plane *plane)
2508 {
2509         if (plane->fb == plane->state->fb)
2510                 return;
2511
2512         if (plane->state->fb)
2513                 drm_framebuffer_unreference(plane->state->fb);
2514         plane->state->fb = plane->fb;
2515         if (plane->state->fb)
2516                 drm_framebuffer_reference(plane->state->fb);
2517 }
2518
2519 static void
2520 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2521                              struct intel_initial_plane_config *plane_config)
2522 {
2523         struct drm_device *dev = intel_crtc->base.dev;
2524         struct drm_i915_private *dev_priv = to_i915(dev);
2525         struct drm_crtc *c;
2526         struct intel_crtc *i;
2527         struct drm_i915_gem_object *obj;
2528         struct drm_plane *primary = intel_crtc->base.primary;
2529         struct drm_plane_state *plane_state = primary->state;
2530         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2531         struct intel_plane *intel_plane = to_intel_plane(primary);
2532         struct intel_plane_state *intel_state =
2533                 to_intel_plane_state(plane_state);
2534         struct drm_framebuffer *fb;
2535
2536         if (!plane_config->fb)
2537                 return;
2538
2539         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2540                 fb = &plane_config->fb->base;
2541                 goto valid_fb;
2542         }
2543
2544         kfree(plane_config->fb);
2545
2546         /*
2547          * Failed to alloc the obj, check to see if we should share
2548          * an fb with another CRTC instead
2549          */
2550         for_each_crtc(dev, c) {
2551                 i = to_intel_crtc(c);
2552
2553                 if (c == &intel_crtc->base)
2554                         continue;
2555
2556                 if (!i->active)
2557                         continue;
2558
2559                 fb = c->primary->fb;
2560                 if (!fb)
2561                         continue;
2562
2563                 obj = intel_fb_obj(fb);
2564                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2565                         drm_framebuffer_reference(fb);
2566                         goto valid_fb;
2567                 }
2568         }
2569
2570         /*
2571          * We've failed to reconstruct the BIOS FB.  Current display state
2572          * indicates that the primary plane is visible, but has a NULL FB,
2573          * which will lead to problems later if we don't fix it up.  The
2574          * simplest solution is to just disable the primary plane now and
2575          * pretend the BIOS never had it enabled.
2576          */
2577         to_intel_plane_state(plane_state)->visible = false;
2578         crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2579         intel_pre_disable_primary_noatomic(&intel_crtc->base);
2580         intel_plane->disable_plane(primary, &intel_crtc->base);
2581
2582         return;
2583
2584 valid_fb:
2585         plane_state->src_x = 0;
2586         plane_state->src_y = 0;
2587         plane_state->src_w = fb->width << 16;
2588         plane_state->src_h = fb->height << 16;
2589
2590         plane_state->crtc_x = 0;
2591         plane_state->crtc_y = 0;
2592         plane_state->crtc_w = fb->width;
2593         plane_state->crtc_h = fb->height;
2594
2595         intel_state->src.x1 = plane_state->src_x;
2596         intel_state->src.y1 = plane_state->src_y;
2597         intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2598         intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2599         intel_state->dst.x1 = plane_state->crtc_x;
2600         intel_state->dst.y1 = plane_state->crtc_y;
2601         intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2602         intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2603
2604         obj = intel_fb_obj(fb);
2605         if (obj->tiling_mode != I915_TILING_NONE)
2606                 dev_priv->preserve_bios_swizzle = true;
2607
2608         drm_framebuffer_reference(fb);
2609         primary->fb = primary->state->fb = fb;
2610         primary->crtc = primary->state->crtc = &intel_crtc->base;
2611         intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2612         obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2613 }
2614
2615 static void i9xx_update_primary_plane(struct drm_plane *primary,
2616                                       const struct intel_crtc_state *crtc_state,
2617                                       const struct intel_plane_state *plane_state)
2618 {
2619         struct drm_device *dev = primary->dev;
2620         struct drm_i915_private *dev_priv = to_i915(dev);
2621         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2622         struct drm_framebuffer *fb = plane_state->base.fb;
2623         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2624         int plane = intel_crtc->plane;
2625         u32 linear_offset;
2626         u32 dspcntr;
2627         i915_reg_t reg = DSPCNTR(plane);
2628         unsigned int rotation = plane_state->base.rotation;
2629         int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2630         int x = plane_state->src.x1 >> 16;
2631         int y = plane_state->src.y1 >> 16;
2632
2633         dspcntr = DISPPLANE_GAMMA_ENABLE;
2634
2635         dspcntr |= DISPLAY_PLANE_ENABLE;
2636
2637         if (INTEL_INFO(dev)->gen < 4) {
2638                 if (intel_crtc->pipe == PIPE_B)
2639                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2640
2641                 /* pipesrc and dspsize control the size that is scaled from,
2642                  * which should always be the user's requested size.
2643                  */
2644                 I915_WRITE(DSPSIZE(plane),
2645                            ((crtc_state->pipe_src_h - 1) << 16) |
2646                            (crtc_state->pipe_src_w - 1));
2647                 I915_WRITE(DSPPOS(plane), 0);
2648         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2649                 I915_WRITE(PRIMSIZE(plane),
2650                            ((crtc_state->pipe_src_h - 1) << 16) |
2651                            (crtc_state->pipe_src_w - 1));
2652                 I915_WRITE(PRIMPOS(plane), 0);
2653                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2654         }
2655
2656         switch (fb->pixel_format) {
2657         case DRM_FORMAT_C8:
2658                 dspcntr |= DISPPLANE_8BPP;
2659                 break;
2660         case DRM_FORMAT_XRGB1555:
2661                 dspcntr |= DISPPLANE_BGRX555;
2662                 break;
2663         case DRM_FORMAT_RGB565:
2664                 dspcntr |= DISPPLANE_BGRX565;
2665                 break;
2666         case DRM_FORMAT_XRGB8888:
2667                 dspcntr |= DISPPLANE_BGRX888;
2668                 break;
2669         case DRM_FORMAT_XBGR8888:
2670                 dspcntr |= DISPPLANE_RGBX888;
2671                 break;
2672         case DRM_FORMAT_XRGB2101010:
2673                 dspcntr |= DISPPLANE_BGRX101010;
2674                 break;
2675         case DRM_FORMAT_XBGR2101010:
2676                 dspcntr |= DISPPLANE_RGBX101010;
2677                 break;
2678         default:
2679                 BUG();
2680         }
2681
2682         if (INTEL_INFO(dev)->gen >= 4 &&
2683             obj->tiling_mode != I915_TILING_NONE)
2684                 dspcntr |= DISPPLANE_TILED;
2685
2686         if (IS_G4X(dev))
2687                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2688
2689         linear_offset = y * fb->pitches[0] + x * cpp;
2690
2691         if (INTEL_INFO(dev)->gen >= 4) {
2692                 intel_crtc->dspaddr_offset =
2693                         intel_compute_tile_offset(&x, &y, fb, 0,
2694                                                   fb->pitches[0], rotation);
2695                 linear_offset -= intel_crtc->dspaddr_offset;
2696         } else {
2697                 intel_crtc->dspaddr_offset = linear_offset;
2698         }
2699
2700         if (rotation == BIT(DRM_ROTATE_180)) {
2701                 dspcntr |= DISPPLANE_ROTATE_180;
2702
2703                 x += (crtc_state->pipe_src_w - 1);
2704                 y += (crtc_state->pipe_src_h - 1);
2705
2706                 /* Finding the last pixel of the last line of the display
2707                 data and adding to linear_offset*/
2708                 linear_offset +=
2709                         (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2710                         (crtc_state->pipe_src_w - 1) * cpp;
2711         }
2712
2713         intel_crtc->adjusted_x = x;
2714         intel_crtc->adjusted_y = y;
2715
2716         I915_WRITE(reg, dspcntr);
2717
2718         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2719         if (INTEL_INFO(dev)->gen >= 4) {
2720                 I915_WRITE(DSPSURF(plane),
2721                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2722                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2723                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2724         } else
2725                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2726         POSTING_READ(reg);
2727 }
2728
2729 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2730                                        struct drm_crtc *crtc)
2731 {
2732         struct drm_device *dev = crtc->dev;
2733         struct drm_i915_private *dev_priv = to_i915(dev);
2734         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2735         int plane = intel_crtc->plane;
2736
2737         I915_WRITE(DSPCNTR(plane), 0);
2738         if (INTEL_INFO(dev_priv)->gen >= 4)
2739                 I915_WRITE(DSPSURF(plane), 0);
2740         else
2741                 I915_WRITE(DSPADDR(plane), 0);
2742         POSTING_READ(DSPCNTR(plane));
2743 }
2744
2745 static void ironlake_update_primary_plane(struct drm_plane *primary,
2746                                           const struct intel_crtc_state *crtc_state,
2747                                           const struct intel_plane_state *plane_state)
2748 {
2749         struct drm_device *dev = primary->dev;
2750         struct drm_i915_private *dev_priv = to_i915(dev);
2751         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2752         struct drm_framebuffer *fb = plane_state->base.fb;
2753         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2754         int plane = intel_crtc->plane;
2755         u32 linear_offset;
2756         u32 dspcntr;
2757         i915_reg_t reg = DSPCNTR(plane);
2758         unsigned int rotation = plane_state->base.rotation;
2759         int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2760         int x = plane_state->src.x1 >> 16;
2761         int y = plane_state->src.y1 >> 16;
2762
2763         dspcntr = DISPPLANE_GAMMA_ENABLE;
2764         dspcntr |= DISPLAY_PLANE_ENABLE;
2765
2766         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2767                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2768
2769         switch (fb->pixel_format) {
2770         case DRM_FORMAT_C8:
2771                 dspcntr |= DISPPLANE_8BPP;
2772                 break;
2773         case DRM_FORMAT_RGB565:
2774                 dspcntr |= DISPPLANE_BGRX565;
2775                 break;
2776         case DRM_FORMAT_XRGB8888:
2777                 dspcntr |= DISPPLANE_BGRX888;
2778                 break;
2779         case DRM_FORMAT_XBGR8888:
2780                 dspcntr |= DISPPLANE_RGBX888;
2781                 break;
2782         case DRM_FORMAT_XRGB2101010:
2783                 dspcntr |= DISPPLANE_BGRX101010;
2784                 break;
2785         case DRM_FORMAT_XBGR2101010:
2786                 dspcntr |= DISPPLANE_RGBX101010;
2787                 break;
2788         default:
2789                 BUG();
2790         }
2791
2792         if (obj->tiling_mode != I915_TILING_NONE)
2793                 dspcntr |= DISPPLANE_TILED;
2794
2795         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2796                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2797
2798         linear_offset = y * fb->pitches[0] + x * cpp;
2799         intel_crtc->dspaddr_offset =
2800                 intel_compute_tile_offset(&x, &y, fb, 0,
2801                                           fb->pitches[0], rotation);
2802         linear_offset -= intel_crtc->dspaddr_offset;
2803         if (rotation == BIT(DRM_ROTATE_180)) {
2804                 dspcntr |= DISPPLANE_ROTATE_180;
2805
2806                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2807                         x += (crtc_state->pipe_src_w - 1);
2808                         y += (crtc_state->pipe_src_h - 1);
2809
2810                         /* Finding the last pixel of the last line of the display
2811                         data and adding to linear_offset*/
2812                         linear_offset +=
2813                                 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2814                                 (crtc_state->pipe_src_w - 1) * cpp;
2815                 }
2816         }
2817
2818         intel_crtc->adjusted_x = x;
2819         intel_crtc->adjusted_y = y;
2820
2821         I915_WRITE(reg, dspcntr);
2822
2823         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2824         I915_WRITE(DSPSURF(plane),
2825                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2826         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2827                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2828         } else {
2829                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2830                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2831         }
2832         POSTING_READ(reg);
2833 }
2834
2835 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2836                               uint64_t fb_modifier, uint32_t pixel_format)
2837 {
2838         if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2839                 return 64;
2840         } else {
2841                 int cpp = drm_format_plane_cpp(pixel_format, 0);
2842
2843                 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2844         }
2845 }
2846
2847 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2848                            struct drm_i915_gem_object *obj,
2849                            unsigned int plane)
2850 {
2851         struct i915_ggtt_view view;
2852         struct i915_vma *vma;
2853         u64 offset;
2854
2855         intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2856                                 intel_plane->base.state->rotation);
2857
2858         vma = i915_gem_obj_to_ggtt_view(obj, &view);
2859         if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2860                 view.type))
2861                 return -1;
2862
2863         offset = vma->node.start;
2864
2865         if (plane == 1) {
2866                 offset += vma->ggtt_view.params.rotated.uv_start_page *
2867                           PAGE_SIZE;
2868         }
2869
2870         WARN_ON(upper_32_bits(offset));
2871
2872         return lower_32_bits(offset);
2873 }
2874
2875 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2876 {
2877         struct drm_device *dev = intel_crtc->base.dev;
2878         struct drm_i915_private *dev_priv = to_i915(dev);
2879
2880         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2881         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2882         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2883 }
2884
2885 /*
2886  * This function detaches (aka. unbinds) unused scalers in hardware
2887  */
2888 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2889 {
2890         struct intel_crtc_scaler_state *scaler_state;
2891         int i;
2892
2893         scaler_state = &intel_crtc->config->scaler_state;
2894
2895         /* loop through and disable scalers that aren't in use */
2896         for (i = 0; i < intel_crtc->num_scalers; i++) {
2897                 if (!scaler_state->scalers[i].in_use)
2898                         skl_detach_scaler(intel_crtc, i);
2899         }
2900 }
2901
2902 u32 skl_plane_ctl_format(uint32_t pixel_format)
2903 {
2904         switch (pixel_format) {
2905         case DRM_FORMAT_C8:
2906                 return PLANE_CTL_FORMAT_INDEXED;
2907         case DRM_FORMAT_RGB565:
2908                 return PLANE_CTL_FORMAT_RGB_565;
2909         case DRM_FORMAT_XBGR8888:
2910                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2911         case DRM_FORMAT_XRGB8888:
2912                 return PLANE_CTL_FORMAT_XRGB_8888;
2913         /*
2914          * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2915          * to be already pre-multiplied. We need to add a knob (or a different
2916          * DRM_FORMAT) for user-space to configure that.
2917          */
2918         case DRM_FORMAT_ABGR8888:
2919                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2920                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2921         case DRM_FORMAT_ARGB8888:
2922                 return PLANE_CTL_FORMAT_XRGB_8888 |
2923                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2924         case DRM_FORMAT_XRGB2101010:
2925                 return PLANE_CTL_FORMAT_XRGB_2101010;
2926         case DRM_FORMAT_XBGR2101010:
2927                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2928         case DRM_FORMAT_YUYV:
2929                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2930         case DRM_FORMAT_YVYU:
2931                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2932         case DRM_FORMAT_UYVY:
2933                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2934         case DRM_FORMAT_VYUY:
2935                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2936         default:
2937                 MISSING_CASE(pixel_format);
2938         }
2939
2940         return 0;
2941 }
2942
2943 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2944 {
2945         switch (fb_modifier) {
2946         case DRM_FORMAT_MOD_NONE:
2947                 break;
2948         case I915_FORMAT_MOD_X_TILED:
2949                 return PLANE_CTL_TILED_X;
2950         case I915_FORMAT_MOD_Y_TILED:
2951                 return PLANE_CTL_TILED_Y;
2952         case I915_FORMAT_MOD_Yf_TILED:
2953                 return PLANE_CTL_TILED_YF;
2954         default:
2955                 MISSING_CASE(fb_modifier);
2956         }
2957
2958         return 0;
2959 }
2960
2961 u32 skl_plane_ctl_rotation(unsigned int rotation)
2962 {
2963         switch (rotation) {
2964         case BIT(DRM_ROTATE_0):
2965                 break;
2966         /*
2967          * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2968          * while i915 HW rotation is clockwise, thats why this swapping.
2969          */
2970         case BIT(DRM_ROTATE_90):
2971                 return PLANE_CTL_ROTATE_270;
2972         case BIT(DRM_ROTATE_180):
2973                 return PLANE_CTL_ROTATE_180;
2974         case BIT(DRM_ROTATE_270):
2975                 return PLANE_CTL_ROTATE_90;
2976         default:
2977                 MISSING_CASE(rotation);
2978         }
2979
2980         return 0;
2981 }
2982
2983 static void skylake_update_primary_plane(struct drm_plane *plane,
2984                                          const struct intel_crtc_state *crtc_state,
2985                                          const struct intel_plane_state *plane_state)
2986 {
2987         struct drm_device *dev = plane->dev;
2988         struct drm_i915_private *dev_priv = to_i915(dev);
2989         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2990         struct drm_framebuffer *fb = plane_state->base.fb;
2991         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2992         int pipe = intel_crtc->pipe;
2993         u32 plane_ctl, stride_div, stride;
2994         u32 tile_height, plane_offset, plane_size;
2995         unsigned int rotation = plane_state->base.rotation;
2996         int x_offset, y_offset;
2997         u32 surf_addr;
2998         int scaler_id = plane_state->scaler_id;
2999         int src_x = plane_state->src.x1 >> 16;
3000         int src_y = plane_state->src.y1 >> 16;
3001         int src_w = drm_rect_width(&plane_state->src) >> 16;
3002         int src_h = drm_rect_height(&plane_state->src) >> 16;
3003         int dst_x = plane_state->dst.x1;
3004         int dst_y = plane_state->dst.y1;
3005         int dst_w = drm_rect_width(&plane_state->dst);
3006         int dst_h = drm_rect_height(&plane_state->dst);
3007
3008         plane_ctl = PLANE_CTL_ENABLE |
3009                     PLANE_CTL_PIPE_GAMMA_ENABLE |
3010                     PLANE_CTL_PIPE_CSC_ENABLE;
3011
3012         plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3013         plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3014         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3015         plane_ctl |= skl_plane_ctl_rotation(rotation);
3016
3017         stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3018                                                fb->pixel_format);
3019         surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3020
3021         WARN_ON(drm_rect_width(&plane_state->src) == 0);
3022
3023         if (intel_rotation_90_or_270(rotation)) {
3024                 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3025
3026                 /* stride = Surface height in tiles */
3027                 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3028                 stride = DIV_ROUND_UP(fb->height, tile_height);
3029                 x_offset = stride * tile_height - src_y - src_h;
3030                 y_offset = src_x;
3031                 plane_size = (src_w - 1) << 16 | (src_h - 1);
3032         } else {
3033                 stride = fb->pitches[0] / stride_div;
3034                 x_offset = src_x;
3035                 y_offset = src_y;
3036                 plane_size = (src_h - 1) << 16 | (src_w - 1);
3037         }
3038         plane_offset = y_offset << 16 | x_offset;
3039
3040         intel_crtc->adjusted_x = x_offset;
3041         intel_crtc->adjusted_y = y_offset;
3042
3043         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3044         I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3045         I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3046         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3047
3048         if (scaler_id >= 0) {
3049                 uint32_t ps_ctrl = 0;
3050
3051                 WARN_ON(!dst_w || !dst_h);
3052                 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3053                         crtc_state->scaler_state.scalers[scaler_id].mode;
3054                 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3055                 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3056                 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3057                 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3058                 I915_WRITE(PLANE_POS(pipe, 0), 0);
3059         } else {
3060                 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3061         }
3062
3063         I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3064
3065         POSTING_READ(PLANE_SURF(pipe, 0));
3066 }
3067
3068 static void skylake_disable_primary_plane(struct drm_plane *primary,
3069                                           struct drm_crtc *crtc)
3070 {
3071         struct drm_device *dev = crtc->dev;
3072         struct drm_i915_private *dev_priv = to_i915(dev);
3073         int pipe = to_intel_crtc(crtc)->pipe;
3074
3075         I915_WRITE(PLANE_CTL(pipe, 0), 0);
3076         I915_WRITE(PLANE_SURF(pipe, 0), 0);
3077         POSTING_READ(PLANE_SURF(pipe, 0));
3078 }
3079
3080 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3081 static int
3082 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3083                            int x, int y, enum mode_set_atomic state)
3084 {
3085         /* Support for kgdboc is disabled, this needs a major rework. */
3086         DRM_ERROR("legacy panic handler not supported any more.\n");
3087
3088         return -ENODEV;
3089 }
3090
3091 static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3092 {
3093         struct intel_crtc *crtc;
3094
3095         for_each_intel_crtc(&dev_priv->drm, crtc)
3096                 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3097 }
3098
3099 static void intel_update_primary_planes(struct drm_device *dev)
3100 {
3101         struct drm_crtc *crtc;
3102
3103         for_each_crtc(dev, crtc) {
3104                 struct intel_plane *plane = to_intel_plane(crtc->primary);
3105                 struct intel_plane_state *plane_state;
3106
3107                 drm_modeset_lock_crtc(crtc, &plane->base);
3108                 plane_state = to_intel_plane_state(plane->base.state);
3109
3110                 if (plane_state->visible)
3111                         plane->update_plane(&plane->base,
3112                                             to_intel_crtc_state(crtc->state),
3113                                             plane_state);
3114
3115                 drm_modeset_unlock_crtc(crtc);
3116         }
3117 }
3118
3119 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3120 {
3121         /* no reset support for gen2 */
3122         if (IS_GEN2(dev_priv))
3123                 return;
3124
3125         /* reset doesn't touch the display */
3126         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3127                 return;
3128
3129         drm_modeset_lock_all(&dev_priv->drm);
3130         /*
3131          * Disabling the crtcs gracefully seems nicer. Also the
3132          * g33 docs say we should at least disable all the planes.
3133          */
3134         intel_display_suspend(&dev_priv->drm);
3135 }
3136
3137 void intel_finish_reset(struct drm_i915_private *dev_priv)
3138 {
3139         /*
3140          * Flips in the rings will be nuked by the reset,
3141          * so complete all pending flips so that user space
3142          * will get its events and not get stuck.
3143          */
3144         intel_complete_page_flips(dev_priv);
3145
3146         /* no reset support for gen2 */
3147         if (IS_GEN2(dev_priv))
3148                 return;
3149
3150         /* reset doesn't touch the display */
3151         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3152                 /*
3153                  * Flips in the rings have been nuked by the reset,
3154                  * so update the base address of all primary
3155                  * planes to the the last fb to make sure we're
3156                  * showing the correct fb after a reset.
3157                  *
3158                  * FIXME: Atomic will make this obsolete since we won't schedule
3159                  * CS-based flips (which might get lost in gpu resets) any more.
3160                  */
3161                 intel_update_primary_planes(&dev_priv->drm);
3162                 return;
3163         }
3164
3165         /*
3166          * The display has been reset as well,
3167          * so need a full re-initialization.
3168          */
3169         intel_runtime_pm_disable_interrupts(dev_priv);
3170         intel_runtime_pm_enable_interrupts(dev_priv);
3171
3172         intel_modeset_init_hw(&dev_priv->drm);
3173
3174         spin_lock_irq(&dev_priv->irq_lock);
3175         if (dev_priv->display.hpd_irq_setup)
3176                 dev_priv->display.hpd_irq_setup(dev_priv);
3177         spin_unlock_irq(&dev_priv->irq_lock);
3178
3179         intel_display_resume(&dev_priv->drm);
3180
3181         intel_hpd_init(dev_priv);
3182
3183         drm_modeset_unlock_all(&dev_priv->drm);
3184 }
3185
3186 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3187 {
3188         struct drm_device *dev = crtc->dev;
3189         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3190         unsigned reset_counter;
3191         bool pending;
3192
3193         reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3194         if (intel_crtc->reset_counter != reset_counter)
3195                 return false;
3196
3197         spin_lock_irq(&dev->event_lock);
3198         pending = to_intel_crtc(crtc)->flip_work != NULL;
3199         spin_unlock_irq(&dev->event_lock);
3200
3201         return pending;
3202 }
3203
3204 static void intel_update_pipe_config(struct intel_crtc *crtc,
3205                                      struct intel_crtc_state *old_crtc_state)
3206 {
3207         struct drm_device *dev = crtc->base.dev;
3208         struct drm_i915_private *dev_priv = to_i915(dev);
3209         struct intel_crtc_state *pipe_config =
3210                 to_intel_crtc_state(crtc->base.state);
3211
3212         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3213         crtc->base.mode = crtc->base.state->mode;
3214
3215         DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3216                       old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3217                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3218
3219         /*
3220          * Update pipe size and adjust fitter if needed: the reason for this is
3221          * that in compute_mode_changes we check the native mode (not the pfit
3222          * mode) to see if we can flip rather than do a full mode set. In the
3223          * fastboot case, we'll flip, but if we don't update the pipesrc and
3224          * pfit state, we'll end up with a big fb scanned out into the wrong
3225          * sized surface.
3226          */
3227
3228         I915_WRITE(PIPESRC(crtc->pipe),
3229                    ((pipe_config->pipe_src_w - 1) << 16) |
3230                    (pipe_config->pipe_src_h - 1));
3231
3232         /* on skylake this is done by detaching scalers */
3233         if (INTEL_INFO(dev)->gen >= 9) {
3234                 skl_detach_scalers(crtc);
3235
3236                 if (pipe_config->pch_pfit.enabled)
3237                         skylake_pfit_enable(crtc);
3238         } else if (HAS_PCH_SPLIT(dev)) {
3239                 if (pipe_config->pch_pfit.enabled)
3240                         ironlake_pfit_enable(crtc);
3241                 else if (old_crtc_state->pch_pfit.enabled)
3242                         ironlake_pfit_disable(crtc, true);
3243         }
3244 }
3245
3246 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3247 {
3248         struct drm_device *dev = crtc->dev;
3249         struct drm_i915_private *dev_priv = to_i915(dev);
3250         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3251         int pipe = intel_crtc->pipe;
3252         i915_reg_t reg;
3253         u32 temp;
3254
3255         /* enable normal train */
3256         reg = FDI_TX_CTL(pipe);
3257         temp = I915_READ(reg);
3258         if (IS_IVYBRIDGE(dev)) {
3259                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3260                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3261         } else {
3262                 temp &= ~FDI_LINK_TRAIN_NONE;
3263                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3264         }
3265         I915_WRITE(reg, temp);
3266
3267         reg = FDI_RX_CTL(pipe);
3268         temp = I915_READ(reg);
3269         if (HAS_PCH_CPT(dev)) {
3270                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3271                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3272         } else {
3273                 temp &= ~FDI_LINK_TRAIN_NONE;
3274                 temp |= FDI_LINK_TRAIN_NONE;
3275         }
3276         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3277
3278         /* wait one idle pattern time */
3279         POSTING_READ(reg);
3280         udelay(1000);
3281
3282         /* IVB wants error correction enabled */
3283         if (IS_IVYBRIDGE(dev))
3284                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3285                            FDI_FE_ERRC_ENABLE);
3286 }
3287
3288 /* The FDI link training functions for ILK/Ibexpeak. */
3289 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3290 {
3291         struct drm_device *dev = crtc->dev;
3292         struct drm_i915_private *dev_priv = to_i915(dev);
3293         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3294         int pipe = intel_crtc->pipe;
3295         i915_reg_t reg;
3296         u32 temp, tries;
3297
3298         /* FDI needs bits from pipe first */
3299         assert_pipe_enabled(dev_priv, pipe);
3300
3301         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3302            for train result */
3303         reg = FDI_RX_IMR(pipe);
3304         temp = I915_READ(reg);
3305         temp &= ~FDI_RX_SYMBOL_LOCK;
3306         temp &= ~FDI_RX_BIT_LOCK;
3307         I915_WRITE(reg, temp);
3308         I915_READ(reg);
3309         udelay(150);
3310
3311         /* enable CPU FDI TX and PCH FDI RX */
3312         reg = FDI_TX_CTL(pipe);
3313         temp = I915_READ(reg);
3314         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3315         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3316         temp &= ~FDI_LINK_TRAIN_NONE;
3317         temp |= FDI_LINK_TRAIN_PATTERN_1;
3318         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3319
3320         reg = FDI_RX_CTL(pipe);
3321         temp = I915_READ(reg);
3322         temp &= ~FDI_LINK_TRAIN_NONE;
3323         temp |= FDI_LINK_TRAIN_PATTERN_1;
3324         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3325
3326         POSTING_READ(reg);
3327         udelay(150);
3328
3329         /* Ironlake workaround, enable clock pointer after FDI enable*/
3330         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3331         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3332                    FDI_RX_PHASE_SYNC_POINTER_EN);
3333
3334         reg = FDI_RX_IIR(pipe);
3335         for (tries = 0; tries < 5; tries++) {
3336                 temp = I915_READ(reg);
3337                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3338
3339                 if ((temp & FDI_RX_BIT_LOCK)) {
3340                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3341                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3342                         break;
3343                 }
3344         }
3345         if (tries == 5)
3346                 DRM_ERROR("FDI train 1 fail!\n");
3347
3348         /* Train 2 */
3349         reg = FDI_TX_CTL(pipe);
3350         temp = I915_READ(reg);
3351         temp &= ~FDI_LINK_TRAIN_NONE;
3352         temp |= FDI_LINK_TRAIN_PATTERN_2;
3353         I915_WRITE(reg, temp);
3354
3355         reg = FDI_RX_CTL(pipe);
3356         temp = I915_READ(reg);
3357         temp &= ~FDI_LINK_TRAIN_NONE;
3358         temp |= FDI_LINK_TRAIN_PATTERN_2;
3359         I915_WRITE(reg, temp);
3360
3361         POSTING_READ(reg);
3362         udelay(150);
3363
3364         reg = FDI_RX_IIR(pipe);
3365         for (tries = 0; tries < 5; tries++) {
3366                 temp = I915_READ(reg);
3367                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3368
3369                 if (temp & FDI_RX_SYMBOL_LOCK) {
3370                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3371                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3372                         break;
3373                 }
3374         }
3375         if (tries == 5)
3376                 DRM_ERROR("FDI train 2 fail!\n");
3377
3378         DRM_DEBUG_KMS("FDI train done\n");
3379
3380 }
3381
3382 static const int snb_b_fdi_train_param[] = {
3383         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3384         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3385         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3386         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3387 };
3388
3389 /* The FDI link training functions for SNB/Cougarpoint. */
3390 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3391 {
3392         struct drm_device *dev = crtc->dev;
3393         struct drm_i915_private *dev_priv = to_i915(dev);
3394         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3395         int pipe = intel_crtc->pipe;
3396         i915_reg_t reg;
3397         u32 temp, i, retry;
3398
3399         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3400            for train result */
3401         reg = FDI_RX_IMR(pipe);
3402         temp = I915_READ(reg);
3403         temp &= ~FDI_RX_SYMBOL_LOCK;
3404         temp &= ~FDI_RX_BIT_LOCK;
3405         I915_WRITE(reg, temp);
3406
3407         POSTING_READ(reg);
3408         udelay(150);
3409
3410         /* enable CPU FDI TX and PCH FDI RX */
3411         reg = FDI_TX_CTL(pipe);
3412         temp = I915_READ(reg);
3413         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3414         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3415         temp &= ~FDI_LINK_TRAIN_NONE;
3416         temp |= FDI_LINK_TRAIN_PATTERN_1;
3417         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3418         /* SNB-B */
3419         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3420         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3421
3422         I915_WRITE(FDI_RX_MISC(pipe),
3423                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3424
3425         reg = FDI_RX_CTL(pipe);
3426         temp = I915_READ(reg);
3427         if (HAS_PCH_CPT(dev)) {
3428                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3429                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3430         } else {
3431                 temp &= ~FDI_LINK_TRAIN_NONE;
3432                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3433         }
3434         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3435
3436         POSTING_READ(reg);
3437         udelay(150);
3438
3439         for (i = 0; i < 4; i++) {
3440                 reg = FDI_TX_CTL(pipe);
3441                 temp = I915_READ(reg);
3442                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3443                 temp |= snb_b_fdi_train_param[i];
3444                 I915_WRITE(reg, temp);
3445
3446                 POSTING_READ(reg);
3447                 udelay(500);
3448
3449                 for (retry = 0; retry < 5; retry++) {
3450                         reg = FDI_RX_IIR(pipe);
3451                         temp = I915_READ(reg);
3452                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3453                         if (temp & FDI_RX_BIT_LOCK) {
3454                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3455                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3456                                 break;
3457                         }
3458                         udelay(50);
3459                 }
3460                 if (retry < 5)
3461                         break;
3462         }
3463         if (i == 4)
3464                 DRM_ERROR("FDI train 1 fail!\n");
3465
3466         /* Train 2 */
3467         reg = FDI_TX_CTL(pipe);
3468         temp = I915_READ(reg);
3469         temp &= ~FDI_LINK_TRAIN_NONE;
3470         temp |= FDI_LINK_TRAIN_PATTERN_2;
3471         if (IS_GEN6(dev)) {
3472                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3473                 /* SNB-B */
3474                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3475         }
3476         I915_WRITE(reg, temp);
3477
3478         reg = FDI_RX_CTL(pipe);
3479         temp = I915_READ(reg);
3480         if (HAS_PCH_CPT(dev)) {
3481                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3482                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3483         } else {
3484                 temp &= ~FDI_LINK_TRAIN_NONE;
3485                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3486         }
3487         I915_WRITE(reg, temp);
3488
3489         POSTING_READ(reg);
3490         udelay(150);
3491
3492         for (i = 0; i < 4; i++) {
3493                 reg = FDI_TX_CTL(pipe);
3494                 temp = I915_READ(reg);
3495                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3496                 temp |= snb_b_fdi_train_param[i];
3497                 I915_WRITE(reg, temp);
3498
3499                 POSTING_READ(reg);
3500                 udelay(500);
3501
3502                 for (retry = 0; retry < 5; retry++) {
3503                         reg = FDI_RX_IIR(pipe);
3504                         temp = I915_READ(reg);
3505                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3506                         if (temp & FDI_RX_SYMBOL_LOCK) {
3507                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3508                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3509                                 break;
3510                         }
3511                         udelay(50);
3512                 }
3513                 if (retry < 5)
3514                         break;
3515         }
3516         if (i == 4)
3517                 DRM_ERROR("FDI train 2 fail!\n");
3518
3519         DRM_DEBUG_KMS("FDI train done.\n");
3520 }
3521
3522 /* Manual link training for Ivy Bridge A0 parts */
3523 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3524 {
3525         struct drm_device *dev = crtc->dev;
3526         struct drm_i915_private *dev_priv = to_i915(dev);
3527         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3528         int pipe = intel_crtc->pipe;
3529         i915_reg_t reg;
3530         u32 temp, i, j;
3531
3532         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3533            for train result */
3534         reg = FDI_RX_IMR(pipe);
3535         temp = I915_READ(reg);
3536         temp &= ~FDI_RX_SYMBOL_LOCK;
3537         temp &= ~FDI_RX_BIT_LOCK;
3538         I915_WRITE(reg, temp);
3539
3540         POSTING_READ(reg);
3541         udelay(150);
3542
3543         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3544                       I915_READ(FDI_RX_IIR(pipe)));
3545
3546         /* Try each vswing and preemphasis setting twice before moving on */
3547         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3548                 /* disable first in case we need to retry */
3549                 reg = FDI_TX_CTL(pipe);
3550                 temp = I915_READ(reg);
3551                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3552                 temp &= ~FDI_TX_ENABLE;
3553                 I915_WRITE(reg, temp);
3554
3555                 reg = FDI_RX_CTL(pipe);
3556                 temp = I915_READ(reg);
3557                 temp &= ~FDI_LINK_TRAIN_AUTO;
3558                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3559                 temp &= ~FDI_RX_ENABLE;
3560                 I915_WRITE(reg, temp);
3561
3562                 /* enable CPU FDI TX and PCH FDI RX */
3563                 reg = FDI_TX_CTL(pipe);
3564                 temp = I915_READ(reg);
3565                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3566                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3567                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3568                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3569                 temp |= snb_b_fdi_train_param[j/2];
3570                 temp |= FDI_COMPOSITE_SYNC;
3571                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3572
3573                 I915_WRITE(FDI_RX_MISC(pipe),
3574                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3575
3576                 reg = FDI_RX_CTL(pipe);
3577                 temp = I915_READ(reg);
3578                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3579                 temp |= FDI_COMPOSITE_SYNC;
3580                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3581
3582                 POSTING_READ(reg);
3583                 udelay(1); /* should be 0.5us */
3584
3585                 for (i = 0; i < 4; i++) {
3586                         reg = FDI_RX_IIR(pipe);
3587                         temp = I915_READ(reg);
3588                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3589
3590                         if (temp & FDI_RX_BIT_LOCK ||
3591                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3592                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3593                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3594                                               i);
3595                                 break;
3596                         }
3597                         udelay(1); /* should be 0.5us */
3598                 }
3599                 if (i == 4) {
3600                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3601                         continue;
3602                 }
3603
3604                 /* Train 2 */
3605                 reg = FDI_TX_CTL(pipe);
3606                 temp = I915_READ(reg);
3607                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3608                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3609                 I915_WRITE(reg, temp);
3610
3611                 reg = FDI_RX_CTL(pipe);
3612                 temp = I915_READ(reg);
3613                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3614                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3615                 I915_WRITE(reg, temp);
3616
3617                 POSTING_READ(reg);
3618                 udelay(2); /* should be 1.5us */
3619
3620                 for (i = 0; i < 4; i++) {
3621                         reg = FDI_RX_IIR(pipe);
3622                         temp = I915_READ(reg);
3623                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3624
3625                         if (temp & FDI_RX_SYMBOL_LOCK ||
3626                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3627                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3628                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3629                                               i);
3630                                 goto train_done;
3631                         }
3632                         udelay(2); /* should be 1.5us */
3633                 }
3634                 if (i == 4)
3635                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3636         }
3637
3638 train_done:
3639         DRM_DEBUG_KMS("FDI train done.\n");
3640 }
3641
3642 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3643 {
3644         struct drm_device *dev = intel_crtc->base.dev;
3645         struct drm_i915_private *dev_priv = to_i915(dev);
3646         int pipe = intel_crtc->pipe;
3647         i915_reg_t reg;
3648         u32 temp;
3649
3650         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3651         reg = FDI_RX_CTL(pipe);
3652         temp = I915_READ(reg);
3653         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3654         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3655         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3656         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3657
3658         POSTING_READ(reg);
3659         udelay(200);
3660
3661         /* Switch from Rawclk to PCDclk */
3662         temp = I915_READ(reg);
3663         I915_WRITE(reg, temp | FDI_PCDCLK);
3664
3665         POSTING_READ(reg);
3666         udelay(200);
3667
3668         /* Enable CPU FDI TX PLL, always on for Ironlake */
3669         reg = FDI_TX_CTL(pipe);
3670         temp = I915_READ(reg);
3671         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3672                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3673
3674                 POSTING_READ(reg);
3675                 udelay(100);
3676         }
3677 }
3678
3679 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3680 {
3681         struct drm_device *dev = intel_crtc->base.dev;
3682         struct drm_i915_private *dev_priv = to_i915(dev);
3683         int pipe = intel_crtc->pipe;
3684         i915_reg_t reg;
3685         u32 temp;
3686
3687         /* Switch from PCDclk to Rawclk */
3688         reg = FDI_RX_CTL(pipe);
3689         temp = I915_READ(reg);
3690         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3691
3692         /* Disable CPU FDI TX PLL */
3693         reg = FDI_TX_CTL(pipe);
3694         temp = I915_READ(reg);
3695         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3696
3697         POSTING_READ(reg);
3698         udelay(100);
3699
3700         reg = FDI_RX_CTL(pipe);
3701         temp = I915_READ(reg);
3702         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3703
3704         /* Wait for the clocks to turn off. */
3705         POSTING_READ(reg);
3706         udelay(100);
3707 }
3708
3709 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3710 {
3711         struct drm_device *dev = crtc->dev;
3712         struct drm_i915_private *dev_priv = to_i915(dev);
3713         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3714         int pipe = intel_crtc->pipe;
3715         i915_reg_t reg;
3716         u32 temp;
3717
3718         /* disable CPU FDI tx and PCH FDI rx */
3719         reg = FDI_TX_CTL(pipe);
3720         temp = I915_READ(reg);
3721         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3722         POSTING_READ(reg);
3723
3724         reg = FDI_RX_CTL(pipe);
3725         temp = I915_READ(reg);
3726         temp &= ~(0x7 << 16);
3727         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3728         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3729
3730         POSTING_READ(reg);
3731         udelay(100);
3732
3733         /* Ironlake workaround, disable clock pointer after downing FDI */
3734         if (HAS_PCH_IBX(dev))
3735                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3736
3737         /* still set train pattern 1 */
3738         reg = FDI_TX_CTL(pipe);
3739         temp = I915_READ(reg);
3740         temp &= ~FDI_LINK_TRAIN_NONE;
3741         temp |= FDI_LINK_TRAIN_PATTERN_1;
3742         I915_WRITE(reg, temp);
3743
3744         reg = FDI_RX_CTL(pipe);
3745         temp = I915_READ(reg);
3746         if (HAS_PCH_CPT(dev)) {
3747                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3748                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3749         } else {
3750                 temp &= ~FDI_LINK_TRAIN_NONE;
3751                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3752         }
3753         /* BPC in FDI rx is consistent with that in PIPECONF */
3754         temp &= ~(0x07 << 16);
3755         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3756         I915_WRITE(reg, temp);
3757
3758         POSTING_READ(reg);
3759         udelay(100);
3760 }
3761
3762 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3763 {
3764         struct intel_crtc *crtc;
3765
3766         /* Note that we don't need to be called with mode_config.lock here
3767          * as our list of CRTC objects is static for the lifetime of the
3768          * device and so cannot disappear as we iterate. Similarly, we can
3769          * happily treat the predicates as racy, atomic checks as userspace
3770          * cannot claim and pin a new fb without at least acquring the
3771          * struct_mutex and so serialising with us.
3772          */
3773         for_each_intel_crtc(dev, crtc) {
3774                 if (atomic_read(&crtc->unpin_work_count) == 0)
3775                         continue;
3776
3777                 if (crtc->flip_work)
3778                         intel_wait_for_vblank(dev, crtc->pipe);
3779
3780                 return true;
3781         }
3782
3783         return false;
3784 }
3785
3786 static void page_flip_completed(struct intel_crtc *intel_crtc)
3787 {
3788         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3789         struct intel_flip_work *work = intel_crtc->flip_work;
3790
3791         intel_crtc->flip_work = NULL;
3792
3793         if (work->event)
3794                 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3795
3796         drm_crtc_vblank_put(&intel_crtc->base);
3797
3798         wake_up_all(&dev_priv->pending_flip_queue);
3799         queue_work(dev_priv->wq, &work->unpin_work);
3800
3801         trace_i915_flip_complete(intel_crtc->plane,
3802                                  work->pending_flip_obj);
3803 }
3804
3805 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3806 {
3807         struct drm_device *dev = crtc->dev;
3808         struct drm_i915_private *dev_priv = to_i915(dev);
3809         long ret;
3810
3811         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3812
3813         ret = wait_event_interruptible_timeout(
3814                                         dev_priv->pending_flip_queue,
3815                                         !intel_crtc_has_pending_flip(crtc),
3816                                         60*HZ);
3817
3818         if (ret < 0)
3819                 return ret;
3820
3821         if (ret == 0) {
3822                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3823                 struct intel_flip_work *work;
3824
3825                 spin_lock_irq(&dev->event_lock);
3826                 work = intel_crtc->flip_work;
3827                 if (work && !is_mmio_work(work)) {
3828                         WARN_ONCE(1, "Removing stuck page flip\n");
3829                         page_flip_completed(intel_crtc);
3830                 }
3831                 spin_unlock_irq(&dev->event_lock);
3832         }
3833
3834         return 0;
3835 }
3836
3837 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3838 {
3839         u32 temp;
3840
3841         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3842
3843         mutex_lock(&dev_priv->sb_lock);
3844
3845         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3846         temp |= SBI_SSCCTL_DISABLE;
3847         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3848
3849         mutex_unlock(&dev_priv->sb_lock);
3850 }
3851
3852 /* Program iCLKIP clock to the desired frequency */
3853 static void lpt_program_iclkip(struct drm_crtc *crtc)
3854 {
3855         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3856         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3857         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3858         u32 temp;
3859
3860         lpt_disable_iclkip(dev_priv);
3861
3862         /* The iCLK virtual clock root frequency is in MHz,
3863          * but the adjusted_mode->crtc_clock in in KHz. To get the
3864          * divisors, it is necessary to divide one by another, so we
3865          * convert the virtual clock precision to KHz here for higher
3866          * precision.
3867          */
3868         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3869                 u32 iclk_virtual_root_freq = 172800 * 1000;
3870                 u32 iclk_pi_range = 64;
3871                 u32 desired_divisor;
3872
3873                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3874                                                     clock << auxdiv);
3875                 divsel = (desired_divisor / iclk_pi_range) - 2;
3876                 phaseinc = desired_divisor % iclk_pi_range;
3877
3878                 /*
3879                  * Near 20MHz is a corner case which is
3880                  * out of range for the 7-bit divisor
3881                  */
3882                 if (divsel <= 0x7f)
3883                         break;
3884         }
3885
3886         /* This should not happen with any sane values */
3887         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3888                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3889         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3890                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3891
3892         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3893                         clock,
3894                         auxdiv,
3895                         divsel,
3896                         phasedir,
3897                         phaseinc);
3898
3899         mutex_lock(&dev_priv->sb_lock);
3900
3901         /* Program SSCDIVINTPHASE6 */
3902         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3903         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3904         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3905         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3906         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3907         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3908         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3909         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3910
3911         /* Program SSCAUXDIV */
3912         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3913         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3914         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3915         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3916
3917         /* Enable modulator and associated divider */
3918         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3919         temp &= ~SBI_SSCCTL_DISABLE;
3920         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3921
3922         mutex_unlock(&dev_priv->sb_lock);
3923
3924         /* Wait for initialization time */
3925         udelay(24);
3926
3927         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3928 }
3929
3930 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3931 {
3932         u32 divsel, phaseinc, auxdiv;
3933         u32 iclk_virtual_root_freq = 172800 * 1000;
3934         u32 iclk_pi_range = 64;
3935         u32 desired_divisor;
3936         u32 temp;
3937
3938         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3939                 return 0;
3940
3941         mutex_lock(&dev_priv->sb_lock);
3942
3943         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3944         if (temp & SBI_SSCCTL_DISABLE) {
3945                 mutex_unlock(&dev_priv->sb_lock);
3946                 return 0;
3947         }
3948
3949         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3950         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3951                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3952         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3953                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3954
3955         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3956         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3957                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3958
3959         mutex_unlock(&dev_priv->sb_lock);
3960
3961         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3962
3963         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3964                                  desired_divisor << auxdiv);
3965 }
3966
3967 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3968                                                 enum pipe pch_transcoder)
3969 {
3970         struct drm_device *dev = crtc->base.dev;
3971         struct drm_i915_private *dev_priv = to_i915(dev);
3972         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3973
3974         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3975                    I915_READ(HTOTAL(cpu_transcoder)));
3976         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3977                    I915_READ(HBLANK(cpu_transcoder)));
3978         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3979                    I915_READ(HSYNC(cpu_transcoder)));
3980
3981         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3982                    I915_READ(VTOTAL(cpu_transcoder)));
3983         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3984                    I915_READ(VBLANK(cpu_transcoder)));
3985         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3986                    I915_READ(VSYNC(cpu_transcoder)));
3987         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3988                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3989 }
3990
3991 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
3992 {
3993         struct drm_i915_private *dev_priv = to_i915(dev);
3994         uint32_t temp;
3995
3996         temp = I915_READ(SOUTH_CHICKEN1);
3997         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
3998                 return;
3999
4000         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4001         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4002
4003         temp &= ~FDI_BC_BIFURCATION_SELECT;
4004         if (enable)
4005                 temp |= FDI_BC_BIFURCATION_SELECT;
4006
4007         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4008         I915_WRITE(SOUTH_CHICKEN1, temp);
4009         POSTING_READ(SOUTH_CHICKEN1);
4010 }
4011
4012 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4013 {
4014         struct drm_device *dev = intel_crtc->base.dev;
4015
4016         switch (intel_crtc->pipe) {
4017         case PIPE_A:
4018                 break;
4019         case PIPE_B:
4020                 if (intel_crtc->config->fdi_lanes > 2)
4021                         cpt_set_fdi_bc_bifurcation(dev, false);
4022                 else
4023                         cpt_set_fdi_bc_bifurcation(dev, true);
4024
4025                 break;
4026         case PIPE_C:
4027                 cpt_set_fdi_bc_bifurcation(dev, true);
4028
4029                 break;
4030         default:
4031                 BUG();
4032         }
4033 }
4034
4035 /* Return which DP Port should be selected for Transcoder DP control */
4036 static enum port
4037 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4038 {
4039         struct drm_device *dev = crtc->dev;
4040         struct intel_encoder *encoder;
4041
4042         for_each_encoder_on_crtc(dev, crtc, encoder) {
4043                 if (encoder->type == INTEL_OUTPUT_DP ||
4044                     encoder->type == INTEL_OUTPUT_EDP)
4045                         return enc_to_dig_port(&encoder->base)->port;
4046         }
4047
4048         return -1;
4049 }
4050
4051 /*
4052  * Enable PCH resources required for PCH ports:
4053  *   - PCH PLLs
4054  *   - FDI training & RX/TX
4055  *   - update transcoder timings
4056  *   - DP transcoding bits
4057  *   - transcoder
4058  */
4059 static void ironlake_pch_enable(struct drm_crtc *crtc)
4060 {
4061         struct drm_device *dev = crtc->dev;
4062         struct drm_i915_private *dev_priv = to_i915(dev);
4063         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4064         int pipe = intel_crtc->pipe;
4065         u32 temp;
4066
4067         assert_pch_transcoder_disabled(dev_priv, pipe);
4068
4069         if (IS_IVYBRIDGE(dev))
4070                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4071
4072         /* Write the TU size bits before fdi link training, so that error
4073          * detection works. */
4074         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4075                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4076
4077         /* For PCH output, training FDI link */
4078         dev_priv->display.fdi_link_train(crtc);
4079
4080         /* We need to program the right clock selection before writing the pixel
4081          * mutliplier into the DPLL. */
4082         if (HAS_PCH_CPT(dev)) {
4083                 u32 sel;
4084
4085                 temp = I915_READ(PCH_DPLL_SEL);
4086                 temp |= TRANS_DPLL_ENABLE(pipe);
4087                 sel = TRANS_DPLLB_SEL(pipe);
4088                 if (intel_crtc->config->shared_dpll ==
4089                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4090                         temp |= sel;
4091                 else
4092                         temp &= ~sel;
4093                 I915_WRITE(PCH_DPLL_SEL, temp);
4094         }
4095
4096         /* XXX: pch pll's can be enabled any time before we enable the PCH
4097          * transcoder, and we actually should do this to not upset any PCH
4098          * transcoder that already use the clock when we share it.
4099          *
4100          * Note that enable_shared_dpll tries to do the right thing, but
4101          * get_shared_dpll unconditionally resets the pll - we need that to have
4102          * the right LVDS enable sequence. */
4103         intel_enable_shared_dpll(intel_crtc);
4104
4105         /* set transcoder timing, panel must allow it */
4106         assert_panel_unlocked(dev_priv, pipe);
4107         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4108
4109         intel_fdi_normal_train(crtc);
4110
4111         /* For PCH DP, enable TRANS_DP_CTL */
4112         if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
4113                 const struct drm_display_mode *adjusted_mode =
4114                         &intel_crtc->config->base.adjusted_mode;
4115                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4116                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4117                 temp = I915_READ(reg);
4118                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4119                           TRANS_DP_SYNC_MASK |
4120                           TRANS_DP_BPC_MASK);
4121                 temp |= TRANS_DP_OUTPUT_ENABLE;
4122                 temp |= bpc << 9; /* same format but at 11:9 */
4123
4124                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4125                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4126                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4127                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4128
4129                 switch (intel_trans_dp_port_sel(crtc)) {
4130                 case PORT_B:
4131                         temp |= TRANS_DP_PORT_SEL_B;
4132                         break;
4133                 case PORT_C:
4134                         temp |= TRANS_DP_PORT_SEL_C;
4135                         break;
4136                 case PORT_D:
4137                         temp |= TRANS_DP_PORT_SEL_D;
4138                         break;
4139                 default:
4140                         BUG();
4141                 }
4142
4143                 I915_WRITE(reg, temp);
4144         }
4145
4146         ironlake_enable_pch_transcoder(dev_priv, pipe);
4147 }
4148
4149 static void lpt_pch_enable(struct drm_crtc *crtc)
4150 {
4151         struct drm_device *dev = crtc->dev;
4152         struct drm_i915_private *dev_priv = to_i915(dev);
4153         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4154         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4155
4156         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4157
4158         lpt_program_iclkip(crtc);
4159
4160         /* Set transcoder timing. */
4161         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4162
4163         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4164 }
4165
4166 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4167 {
4168         struct drm_i915_private *dev_priv = to_i915(dev);
4169         i915_reg_t dslreg = PIPEDSL(pipe);
4170         u32 temp;
4171
4172         temp = I915_READ(dslreg);
4173         udelay(500);
4174         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4175                 if (wait_for(I915_READ(dslreg) != temp, 5))
4176                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4177         }
4178 }
4179
4180 static int
4181 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4182                   unsigned scaler_user, int *scaler_id, unsigned int rotation,
4183                   int src_w, int src_h, int dst_w, int dst_h)
4184 {
4185         struct intel_crtc_scaler_state *scaler_state =
4186                 &crtc_state->scaler_state;
4187         struct intel_crtc *intel_crtc =
4188                 to_intel_crtc(crtc_state->base.crtc);
4189         int need_scaling;
4190
4191         need_scaling = intel_rotation_90_or_270(rotation) ?
4192                 (src_h != dst_w || src_w != dst_h):
4193                 (src_w != dst_w || src_h != dst_h);
4194
4195         /*
4196          * if plane is being disabled or scaler is no more required or force detach
4197          *  - free scaler binded to this plane/crtc
4198          *  - in order to do this, update crtc->scaler_usage
4199          *
4200          * Here scaler state in crtc_state is set free so that
4201          * scaler can be assigned to other user. Actual register
4202          * update to free the scaler is done in plane/panel-fit programming.
4203          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4204          */
4205         if (force_detach || !need_scaling) {
4206                 if (*scaler_id >= 0) {
4207                         scaler_state->scaler_users &= ~(1 << scaler_user);
4208                         scaler_state->scalers[*scaler_id].in_use = 0;
4209
4210                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4211                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4212                                 intel_crtc->pipe, scaler_user, *scaler_id,
4213                                 scaler_state->scaler_users);
4214                         *scaler_id = -1;
4215                 }
4216                 return 0;
4217         }
4218
4219         /* range checks */
4220         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4221                 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4222
4223                 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4224                 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4225                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4226                         "size is out of scaler range\n",
4227                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4228                 return -EINVAL;
4229         }
4230
4231         /* mark this plane as a scaler user in crtc_state */
4232         scaler_state->scaler_users |= (1 << scaler_user);
4233         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4234                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4235                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4236                 scaler_state->scaler_users);
4237
4238         return 0;
4239 }
4240
4241 /**
4242  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4243  *
4244  * @state: crtc's scaler state
4245  *
4246  * Return
4247  *     0 - scaler_usage updated successfully
4248  *    error - requested scaling cannot be supported or other error condition
4249  */
4250 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4251 {
4252         struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4253         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4254
4255         DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4256                       intel_crtc->base.base.id, intel_crtc->base.name,
4257                       intel_crtc->pipe, SKL_CRTC_INDEX);
4258
4259         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4260                 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4261                 state->pipe_src_w, state->pipe_src_h,
4262                 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4263 }
4264
4265 /**
4266  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4267  *
4268  * @state: crtc's scaler state
4269  * @plane_state: atomic plane state to update
4270  *
4271  * Return
4272  *     0 - scaler_usage updated successfully
4273  *    error - requested scaling cannot be supported or other error condition
4274  */
4275 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4276                                    struct intel_plane_state *plane_state)
4277 {
4278
4279         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4280         struct intel_plane *intel_plane =
4281                 to_intel_plane(plane_state->base.plane);
4282         struct drm_framebuffer *fb = plane_state->base.fb;
4283         int ret;
4284
4285         bool force_detach = !fb || !plane_state->visible;
4286
4287         DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4288                       intel_plane->base.base.id, intel_plane->base.name,
4289                       intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4290
4291         ret = skl_update_scaler(crtc_state, force_detach,
4292                                 drm_plane_index(&intel_plane->base),
4293                                 &plane_state->scaler_id,
4294                                 plane_state->base.rotation,
4295                                 drm_rect_width(&plane_state->src) >> 16,
4296                                 drm_rect_height(&plane_state->src) >> 16,
4297                                 drm_rect_width(&plane_state->dst),
4298                                 drm_rect_height(&plane_state->dst));
4299
4300         if (ret || plane_state->scaler_id < 0)
4301                 return ret;
4302
4303         /* check colorkey */
4304         if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4305                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4306                               intel_plane->base.base.id,
4307                               intel_plane->base.name);
4308                 return -EINVAL;
4309         }
4310
4311         /* Check src format */
4312         switch (fb->pixel_format) {
4313         case DRM_FORMAT_RGB565:
4314         case DRM_FORMAT_XBGR8888:
4315         case DRM_FORMAT_XRGB8888:
4316         case DRM_FORMAT_ABGR8888:
4317         case DRM_FORMAT_ARGB8888:
4318         case DRM_FORMAT_XRGB2101010:
4319         case DRM_FORMAT_XBGR2101010:
4320         case DRM_FORMAT_YUYV:
4321         case DRM_FORMAT_YVYU:
4322         case DRM_FORMAT_UYVY:
4323         case DRM_FORMAT_VYUY:
4324                 break;
4325         default:
4326                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4327                               intel_plane->base.base.id, intel_plane->base.name,
4328                               fb->base.id, fb->pixel_format);
4329                 return -EINVAL;
4330         }
4331
4332         return 0;
4333 }
4334
4335 static void skylake_scaler_disable(struct intel_crtc *crtc)
4336 {
4337         int i;
4338
4339         for (i = 0; i < crtc->num_scalers; i++)
4340                 skl_detach_scaler(crtc, i);
4341 }
4342
4343 static void skylake_pfit_enable(struct intel_crtc *crtc)
4344 {
4345         struct drm_device *dev = crtc->base.dev;
4346         struct drm_i915_private *dev_priv = to_i915(dev);
4347         int pipe = crtc->pipe;
4348         struct intel_crtc_scaler_state *scaler_state =
4349                 &crtc->config->scaler_state;
4350
4351         DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4352
4353         if (crtc->config->pch_pfit.enabled) {
4354                 int id;
4355
4356                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4357                         DRM_ERROR("Requesting pfit without getting a scaler first\n");
4358                         return;
4359                 }
4360
4361                 id = scaler_state->scaler_id;
4362                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4363                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4364                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4365                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4366
4367                 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4368         }
4369 }
4370
4371 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4372 {
4373         struct drm_device *dev = crtc->base.dev;
4374         struct drm_i915_private *dev_priv = to_i915(dev);
4375         int pipe = crtc->pipe;
4376
4377         if (crtc->config->pch_pfit.enabled) {
4378                 /* Force use of hard-coded filter coefficients
4379                  * as some pre-programmed values are broken,
4380                  * e.g. x201.
4381                  */
4382                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4383                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4384                                                  PF_PIPE_SEL_IVB(pipe));
4385                 else
4386                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4387                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4388                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4389         }
4390 }
4391
4392 void hsw_enable_ips(struct intel_crtc *crtc)
4393 {
4394         struct drm_device *dev = crtc->base.dev;
4395         struct drm_i915_private *dev_priv = to_i915(dev);
4396
4397         if (!crtc->config->ips_enabled)
4398                 return;
4399
4400         /*
4401          * We can only enable IPS after we enable a plane and wait for a vblank
4402          * This function is called from post_plane_update, which is run after
4403          * a vblank wait.
4404          */
4405
4406         assert_plane_enabled(dev_priv, crtc->plane);
4407         if (IS_BROADWELL(dev)) {
4408                 mutex_lock(&dev_priv->rps.hw_lock);
4409                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4410                 mutex_unlock(&dev_priv->rps.hw_lock);
4411                 /* Quoting Art Runyan: "its not safe to expect any particular
4412                  * value in IPS_CTL bit 31 after enabling IPS through the
4413                  * mailbox." Moreover, the mailbox may return a bogus state,
4414                  * so we need to just enable it and continue on.
4415                  */
4416         } else {
4417                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4418                 /* The bit only becomes 1 in the next vblank, so this wait here
4419                  * is essentially intel_wait_for_vblank. If we don't have this
4420                  * and don't wait for vblanks until the end of crtc_enable, then
4421                  * the HW state readout code will complain that the expected
4422                  * IPS_CTL value is not the one we read. */
4423                 if (intel_wait_for_register(dev_priv,
4424                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4425                                             50))
4426                         DRM_ERROR("Timed out waiting for IPS enable\n");
4427         }
4428 }
4429
4430 void hsw_disable_ips(struct intel_crtc *crtc)
4431 {
4432         struct drm_device *dev = crtc->base.dev;
4433         struct drm_i915_private *dev_priv = to_i915(dev);
4434
4435         if (!crtc->config->ips_enabled)
4436                 return;
4437
4438         assert_plane_enabled(dev_priv, crtc->plane);
4439         if (IS_BROADWELL(dev)) {
4440                 mutex_lock(&dev_priv->rps.hw_lock);
4441                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4442                 mutex_unlock(&dev_priv->rps.hw_lock);
4443                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4444                 if (intel_wait_for_register(dev_priv,
4445                                             IPS_CTL, IPS_ENABLE, 0,
4446                                             42))
4447                         DRM_ERROR("Timed out waiting for IPS disable\n");
4448         } else {
4449                 I915_WRITE(IPS_CTL, 0);
4450                 POSTING_READ(IPS_CTL);
4451         }
4452
4453         /* We need to wait for a vblank before we can disable the plane. */
4454         intel_wait_for_vblank(dev, crtc->pipe);
4455 }
4456
4457 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4458 {
4459         if (intel_crtc->overlay) {
4460                 struct drm_device *dev = intel_crtc->base.dev;
4461                 struct drm_i915_private *dev_priv = to_i915(dev);
4462
4463                 mutex_lock(&dev->struct_mutex);
4464                 dev_priv->mm.interruptible = false;
4465                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4466                 dev_priv->mm.interruptible = true;
4467                 mutex_unlock(&dev->struct_mutex);
4468         }
4469
4470         /* Let userspace switch the overlay on again. In most cases userspace
4471          * has to recompute where to put it anyway.
4472          */
4473 }
4474
4475 /**
4476  * intel_post_enable_primary - Perform operations after enabling primary plane
4477  * @crtc: the CRTC whose primary plane was just enabled
4478  *
4479  * Performs potentially sleeping operations that must be done after the primary
4480  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4481  * called due to an explicit primary plane update, or due to an implicit
4482  * re-enable that is caused when a sprite plane is updated to no longer
4483  * completely hide the primary plane.
4484  */
4485 static void
4486 intel_post_enable_primary(struct drm_crtc *crtc)
4487 {
4488         struct drm_device *dev = crtc->dev;
4489         struct drm_i915_private *dev_priv = to_i915(dev);
4490         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4491         int pipe = intel_crtc->pipe;
4492
4493         /*
4494          * FIXME IPS should be fine as long as one plane is
4495          * enabled, but in practice it seems to have problems
4496          * when going from primary only to sprite only and vice
4497          * versa.
4498          */
4499         hsw_enable_ips(intel_crtc);
4500
4501         /*
4502          * Gen2 reports pipe underruns whenever all planes are disabled.
4503          * So don't enable underrun reporting before at least some planes
4504          * are enabled.
4505          * FIXME: Need to fix the logic to work when we turn off all planes
4506          * but leave the pipe running.
4507          */
4508         if (IS_GEN2(dev))
4509                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4510
4511         /* Underruns don't always raise interrupts, so check manually. */
4512         intel_check_cpu_fifo_underruns(dev_priv);
4513         intel_check_pch_fifo_underruns(dev_priv);
4514 }
4515
4516 /* FIXME move all this to pre_plane_update() with proper state tracking */
4517 static void
4518 intel_pre_disable_primary(struct drm_crtc *crtc)
4519 {
4520         struct drm_device *dev = crtc->dev;
4521         struct drm_i915_private *dev_priv = to_i915(dev);
4522         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4523         int pipe = intel_crtc->pipe;
4524
4525         /*
4526          * Gen2 reports pipe underruns whenever all planes are disabled.
4527          * So diasble underrun reporting before all the planes get disabled.
4528          * FIXME: Need to fix the logic to work when we turn off all planes
4529          * but leave the pipe running.
4530          */
4531         if (IS_GEN2(dev))
4532                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4533
4534         /*
4535          * FIXME IPS should be fine as long as one plane is
4536          * enabled, but in practice it seems to have problems
4537          * when going from primary only to sprite only and vice
4538          * versa.
4539          */
4540         hsw_disable_ips(intel_crtc);
4541 }
4542
4543 /* FIXME get rid of this and use pre_plane_update */
4544 static void
4545 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4546 {
4547         struct drm_device *dev = crtc->dev;
4548         struct drm_i915_private *dev_priv = to_i915(dev);
4549         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4550         int pipe = intel_crtc->pipe;
4551
4552         intel_pre_disable_primary(crtc);
4553
4554         /*
4555          * Vblank time updates from the shadow to live plane control register
4556          * are blocked if the memory self-refresh mode is active at that
4557          * moment. So to make sure the plane gets truly disabled, disable
4558          * first the self-refresh mode. The self-refresh enable bit in turn
4559          * will be checked/applied by the HW only at the next frame start
4560          * event which is after the vblank start event, so we need to have a
4561          * wait-for-vblank between disabling the plane and the pipe.
4562          */
4563         if (HAS_GMCH_DISPLAY(dev)) {
4564                 intel_set_memory_cxsr(dev_priv, false);
4565                 dev_priv->wm.vlv.cxsr = false;
4566                 intel_wait_for_vblank(dev, pipe);
4567         }
4568 }
4569
4570 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4571 {
4572         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4573         struct drm_atomic_state *old_state = old_crtc_state->base.state;
4574         struct intel_crtc_state *pipe_config =
4575                 to_intel_crtc_state(crtc->base.state);
4576         struct drm_device *dev = crtc->base.dev;
4577         struct drm_plane *primary = crtc->base.primary;
4578         struct drm_plane_state *old_pri_state =
4579                 drm_atomic_get_existing_plane_state(old_state, primary);
4580
4581         intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4582
4583         crtc->wm.cxsr_allowed = true;
4584
4585         if (pipe_config->update_wm_post && pipe_config->base.active)
4586                 intel_update_watermarks(&crtc->base);
4587
4588         if (old_pri_state) {
4589                 struct intel_plane_state *primary_state =
4590                         to_intel_plane_state(primary->state);
4591                 struct intel_plane_state *old_primary_state =
4592                         to_intel_plane_state(old_pri_state);
4593
4594                 intel_fbc_post_update(crtc);
4595
4596                 if (primary_state->visible &&
4597                     (needs_modeset(&pipe_config->base) ||
4598                      !old_primary_state->visible))
4599                         intel_post_enable_primary(&crtc->base);
4600         }
4601 }
4602
4603 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4604 {
4605         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4606         struct drm_device *dev = crtc->base.dev;
4607         struct drm_i915_private *dev_priv = to_i915(dev);
4608         struct intel_crtc_state *pipe_config =
4609                 to_intel_crtc_state(crtc->base.state);
4610         struct drm_atomic_state *old_state = old_crtc_state->base.state;
4611         struct drm_plane *primary = crtc->base.primary;
4612         struct drm_plane_state *old_pri_state =
4613                 drm_atomic_get_existing_plane_state(old_state, primary);
4614         bool modeset = needs_modeset(&pipe_config->base);
4615
4616         if (old_pri_state) {
4617                 struct intel_plane_state *primary_state =
4618                         to_intel_plane_state(primary->state);
4619                 struct intel_plane_state *old_primary_state =
4620                         to_intel_plane_state(old_pri_state);
4621
4622                 intel_fbc_pre_update(crtc, pipe_config, primary_state);
4623
4624                 if (old_primary_state->visible &&
4625                     (modeset || !primary_state->visible))
4626                         intel_pre_disable_primary(&crtc->base);
4627         }
4628
4629         if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
4630                 crtc->wm.cxsr_allowed = false;
4631
4632                 /*
4633                  * Vblank time updates from the shadow to live plane control register
4634                  * are blocked if the memory self-refresh mode is active at that
4635                  * moment. So to make sure the plane gets truly disabled, disable
4636                  * first the self-refresh mode. The self-refresh enable bit in turn
4637                  * will be checked/applied by the HW only at the next frame start
4638                  * event which is after the vblank start event, so we need to have a
4639                  * wait-for-vblank between disabling the plane and the pipe.
4640                  */
4641                 if (old_crtc_state->base.active) {
4642                         intel_set_memory_cxsr(dev_priv, false);
4643                         dev_priv->wm.vlv.cxsr = false;
4644                         intel_wait_for_vblank(dev, crtc->pipe);
4645                 }
4646         }
4647
4648         /*
4649          * IVB workaround: must disable low power watermarks for at least
4650          * one frame before enabling scaling.  LP watermarks can be re-enabled
4651          * when scaling is disabled.
4652          *
4653          * WaCxSRDisabledForSpriteScaling:ivb
4654          */
4655         if (pipe_config->disable_lp_wm) {
4656                 ilk_disable_lp_wm(dev);
4657                 intel_wait_for_vblank(dev, crtc->pipe);
4658         }
4659
4660         /*
4661          * If we're doing a modeset, we're done.  No need to do any pre-vblank
4662          * watermark programming here.
4663          */
4664         if (needs_modeset(&pipe_config->base))
4665                 return;
4666
4667         /*
4668          * For platforms that support atomic watermarks, program the
4669          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4670          * will be the intermediate values that are safe for both pre- and
4671          * post- vblank; when vblank happens, the 'active' values will be set
4672          * to the final 'target' values and we'll do this again to get the
4673          * optimal watermarks.  For gen9+ platforms, the values we program here
4674          * will be the final target values which will get automatically latched
4675          * at vblank time; no further programming will be necessary.
4676          *
4677          * If a platform hasn't been transitioned to atomic watermarks yet,
4678          * we'll continue to update watermarks the old way, if flags tell
4679          * us to.
4680          */
4681         if (dev_priv->display.initial_watermarks != NULL)
4682                 dev_priv->display.initial_watermarks(pipe_config);
4683         else if (pipe_config->update_wm_pre)
4684                 intel_update_watermarks(&crtc->base);
4685 }
4686
4687 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4688 {
4689         struct drm_device *dev = crtc->dev;
4690         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4691         struct drm_plane *p;
4692         int pipe = intel_crtc->pipe;
4693
4694         intel_crtc_dpms_overlay_disable(intel_crtc);
4695
4696         drm_for_each_plane_mask(p, dev, plane_mask)
4697                 to_intel_plane(p)->disable_plane(p, crtc);
4698
4699         /*
4700          * FIXME: Once we grow proper nuclear flip support out of this we need
4701          * to compute the mask of flip planes precisely. For the time being
4702          * consider this a flip to a NULL plane.
4703          */
4704         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4705 }
4706
4707 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4708 {
4709         struct drm_device *dev = crtc->dev;
4710         struct drm_i915_private *dev_priv = to_i915(dev);
4711         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4712         struct intel_encoder *encoder;
4713         int pipe = intel_crtc->pipe;
4714         struct intel_crtc_state *pipe_config =
4715                 to_intel_crtc_state(crtc->state);
4716
4717         if (WARN_ON(intel_crtc->active))
4718                 return;
4719
4720         /*
4721          * Sometimes spurious CPU pipe underruns happen during FDI
4722          * training, at least with VGA+HDMI cloning. Suppress them.
4723          *
4724          * On ILK we get an occasional spurious CPU pipe underruns
4725          * between eDP port A enable and vdd enable. Also PCH port
4726          * enable seems to result in the occasional CPU pipe underrun.
4727          *
4728          * Spurious PCH underruns also occur during PCH enabling.
4729          */
4730         if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4731                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4732         if (intel_crtc->config->has_pch_encoder)
4733                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4734
4735         if (intel_crtc->config->has_pch_encoder)
4736                 intel_prepare_shared_dpll(intel_crtc);
4737
4738         if (intel_crtc_has_dp_encoder(intel_crtc->config))
4739                 intel_dp_set_m_n(intel_crtc, M1_N1);
4740
4741         intel_set_pipe_timings(intel_crtc);
4742         intel_set_pipe_src_size(intel_crtc);
4743
4744         if (intel_crtc->config->has_pch_encoder) {
4745                 intel_cpu_transcoder_set_m_n(intel_crtc,
4746                                      &intel_crtc->config->fdi_m_n, NULL);
4747         }
4748
4749         ironlake_set_pipeconf(crtc);
4750
4751         intel_crtc->active = true;
4752
4753         for_each_encoder_on_crtc(dev, crtc, encoder)
4754                 if (encoder->pre_enable)
4755                         encoder->pre_enable(encoder);
4756
4757         if (intel_crtc->config->has_pch_encoder) {
4758                 /* Note: FDI PLL enabling _must_ be done before we enable the
4759                  * cpu pipes, hence this is separate from all the other fdi/pch
4760                  * enabling. */
4761                 ironlake_fdi_pll_enable(intel_crtc);
4762         } else {
4763                 assert_fdi_tx_disabled(dev_priv, pipe);
4764                 assert_fdi_rx_disabled(dev_priv, pipe);
4765         }
4766
4767         ironlake_pfit_enable(intel_crtc);
4768
4769         /*
4770          * On ILK+ LUT must be loaded before the pipe is running but with
4771          * clocks enabled
4772          */
4773         intel_color_load_luts(&pipe_config->base);
4774
4775         if (dev_priv->display.initial_watermarks != NULL)
4776                 dev_priv->display.initial_watermarks(intel_crtc->config);
4777         intel_enable_pipe(intel_crtc);
4778
4779         if (intel_crtc->config->has_pch_encoder)
4780                 ironlake_pch_enable(crtc);
4781
4782         assert_vblank_disabled(crtc);
4783         drm_crtc_vblank_on(crtc);
4784
4785         for_each_encoder_on_crtc(dev, crtc, encoder)
4786                 encoder->enable(encoder);
4787
4788         if (HAS_PCH_CPT(dev))
4789                 cpt_verify_modeset(dev, intel_crtc->pipe);
4790
4791         /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4792         if (intel_crtc->config->has_pch_encoder)
4793                 intel_wait_for_vblank(dev, pipe);
4794         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4795         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4796 }
4797
4798 /* IPS only exists on ULT machines and is tied to pipe A. */
4799 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4800 {
4801         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4802 }
4803
4804 static void haswell_crtc_enable(struct drm_crtc *crtc)
4805 {
4806         struct drm_device *dev = crtc->dev;
4807         struct drm_i915_private *dev_priv = to_i915(dev);
4808         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4809         struct intel_encoder *encoder;
4810         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4811         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4812         struct intel_crtc_state *pipe_config =
4813                 to_intel_crtc_state(crtc->state);
4814
4815         if (WARN_ON(intel_crtc->active))
4816                 return;
4817
4818         if (intel_crtc->config->has_pch_encoder)
4819                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4820                                                       false);
4821
4822         for_each_encoder_on_crtc(dev, crtc, encoder)
4823                 if (encoder->pre_pll_enable)
4824                         encoder->pre_pll_enable(encoder);
4825
4826         if (intel_crtc->config->shared_dpll)
4827                 intel_enable_shared_dpll(intel_crtc);
4828
4829         if (intel_crtc_has_dp_encoder(intel_crtc->config))
4830                 intel_dp_set_m_n(intel_crtc, M1_N1);
4831
4832         if (!transcoder_is_dsi(cpu_transcoder))
4833                 intel_set_pipe_timings(intel_crtc);
4834
4835         intel_set_pipe_src_size(intel_crtc);
4836
4837         if (cpu_transcoder != TRANSCODER_EDP &&
4838             !transcoder_is_dsi(cpu_transcoder)) {
4839                 I915_WRITE(PIPE_MULT(cpu_transcoder),
4840                            intel_crtc->config->pixel_multiplier - 1);
4841         }
4842
4843         if (intel_crtc->config->has_pch_encoder) {
4844                 intel_cpu_transcoder_set_m_n(intel_crtc,
4845                                      &intel_crtc->config->fdi_m_n, NULL);
4846         }
4847
4848         if (!transcoder_is_dsi(cpu_transcoder))
4849                 haswell_set_pipeconf(crtc);
4850
4851         haswell_set_pipemisc(crtc);
4852
4853         intel_color_set_csc(&pipe_config->base);
4854
4855         intel_crtc->active = true;
4856
4857         if (intel_crtc->config->has_pch_encoder)
4858                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4859         else
4860                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4861
4862         for_each_encoder_on_crtc(dev, crtc, encoder) {
4863                 if (encoder->pre_enable)
4864                         encoder->pre_enable(encoder);
4865         }
4866
4867         if (intel_crtc->config->has_pch_encoder)
4868                 dev_priv->display.fdi_link_train(crtc);
4869
4870         if (!transcoder_is_dsi(cpu_transcoder))
4871                 intel_ddi_enable_pipe_clock(intel_crtc);
4872
4873         if (INTEL_INFO(dev)->gen >= 9)
4874                 skylake_pfit_enable(intel_crtc);
4875         else
4876                 ironlake_pfit_enable(intel_crtc);
4877
4878         /*
4879          * On ILK+ LUT must be loaded before the pipe is running but with
4880          * clocks enabled
4881          */
4882         intel_color_load_luts(&pipe_config->base);
4883
4884         intel_ddi_set_pipe_settings(crtc);
4885         if (!transcoder_is_dsi(cpu_transcoder))
4886                 intel_ddi_enable_transcoder_func(crtc);
4887
4888         if (dev_priv->display.initial_watermarks != NULL)
4889                 dev_priv->display.initial_watermarks(pipe_config);
4890         else
4891                 intel_update_watermarks(crtc);
4892
4893         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4894         if (!transcoder_is_dsi(cpu_transcoder))
4895                 intel_enable_pipe(intel_crtc);
4896
4897         if (intel_crtc->config->has_pch_encoder)
4898                 lpt_pch_enable(crtc);
4899
4900         if (intel_crtc->config->dp_encoder_is_mst)
4901                 intel_ddi_set_vc_payload_alloc(crtc, true);
4902
4903         assert_vblank_disabled(crtc);
4904         drm_crtc_vblank_on(crtc);
4905
4906         for_each_encoder_on_crtc(dev, crtc, encoder) {
4907                 encoder->enable(encoder);
4908                 intel_opregion_notify_encoder(encoder, true);
4909         }
4910
4911         if (intel_crtc->config->has_pch_encoder) {
4912                 intel_wait_for_vblank(dev, pipe);
4913                 intel_wait_for_vblank(dev, pipe);
4914                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4915                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4916                                                       true);
4917         }
4918
4919         /* If we change the relative order between pipe/planes enabling, we need
4920          * to change the workaround. */
4921         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4922         if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4923                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4924                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4925         }
4926 }
4927
4928 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4929 {
4930         struct drm_device *dev = crtc->base.dev;
4931         struct drm_i915_private *dev_priv = to_i915(dev);
4932         int pipe = crtc->pipe;
4933
4934         /* To avoid upsetting the power well on haswell only disable the pfit if
4935          * it's in use. The hw state code will make sure we get this right. */
4936         if (force || crtc->config->pch_pfit.enabled) {
4937                 I915_WRITE(PF_CTL(pipe), 0);
4938                 I915_WRITE(PF_WIN_POS(pipe), 0);
4939                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4940         }
4941 }
4942
4943 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4944 {
4945         struct drm_device *dev = crtc->dev;
4946         struct drm_i915_private *dev_priv = to_i915(dev);
4947         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4948         struct intel_encoder *encoder;
4949         int pipe = intel_crtc->pipe;
4950
4951         /*
4952          * Sometimes spurious CPU pipe underruns happen when the
4953          * pipe is already disabled, but FDI RX/TX is still enabled.
4954          * Happens at least with VGA+HDMI cloning. Suppress them.
4955          */
4956         if (intel_crtc->config->has_pch_encoder) {
4957                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4958                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4959         }
4960
4961         for_each_encoder_on_crtc(dev, crtc, encoder)
4962                 encoder->disable(encoder);
4963
4964         drm_crtc_vblank_off(crtc);
4965         assert_vblank_disabled(crtc);
4966
4967         intel_disable_pipe(intel_crtc);
4968
4969         ironlake_pfit_disable(intel_crtc, false);
4970
4971         if (intel_crtc->config->has_pch_encoder)
4972                 ironlake_fdi_disable(crtc);
4973
4974         for_each_encoder_on_crtc(dev, crtc, encoder)
4975                 if (encoder->post_disable)
4976                         encoder->post_disable(encoder);
4977
4978         if (intel_crtc->config->has_pch_encoder) {
4979                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4980
4981                 if (HAS_PCH_CPT(dev)) {
4982                         i915_reg_t reg;
4983                         u32 temp;
4984
4985                         /* disable TRANS_DP_CTL */
4986                         reg = TRANS_DP_CTL(pipe);
4987                         temp = I915_READ(reg);
4988                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4989                                   TRANS_DP_PORT_SEL_MASK);
4990                         temp |= TRANS_DP_PORT_SEL_NONE;
4991                         I915_WRITE(reg, temp);
4992
4993                         /* disable DPLL_SEL */
4994                         temp = I915_READ(PCH_DPLL_SEL);
4995                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4996                         I915_WRITE(PCH_DPLL_SEL, temp);
4997                 }
4998
4999                 ironlake_fdi_pll_disable(intel_crtc);
5000         }
5001
5002         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5003         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5004 }
5005
5006 static void haswell_crtc_disable(struct drm_crtc *crtc)
5007 {
5008         struct drm_device *dev = crtc->dev;
5009         struct drm_i915_private *dev_priv = to_i915(dev);
5010         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5011         struct intel_encoder *encoder;
5012         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5013
5014         if (intel_crtc->config->has_pch_encoder)
5015                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5016                                                       false);
5017
5018         for_each_encoder_on_crtc(dev, crtc, encoder) {
5019                 intel_opregion_notify_encoder(encoder, false);
5020                 encoder->disable(encoder);
5021         }
5022
5023         drm_crtc_vblank_off(crtc);
5024         assert_vblank_disabled(crtc);
5025
5026         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5027         if (!transcoder_is_dsi(cpu_transcoder))
5028                 intel_disable_pipe(intel_crtc);
5029
5030         if (intel_crtc->config->dp_encoder_is_mst)
5031                 intel_ddi_set_vc_payload_alloc(crtc, false);
5032
5033         if (!transcoder_is_dsi(cpu_transcoder))
5034                 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5035
5036         if (INTEL_INFO(dev)->gen >= 9)
5037                 skylake_scaler_disable(intel_crtc);
5038         else
5039                 ironlake_pfit_disable(intel_crtc, false);
5040
5041         if (!transcoder_is_dsi(cpu_transcoder))
5042                 intel_ddi_disable_pipe_clock(intel_crtc);
5043
5044         for_each_encoder_on_crtc(dev, crtc, encoder)
5045                 if (encoder->post_disable)
5046                         encoder->post_disable(encoder);
5047
5048         if (intel_crtc->config->has_pch_encoder) {
5049                 lpt_disable_pch_transcoder(dev_priv);
5050                 lpt_disable_iclkip(dev_priv);
5051                 intel_ddi_fdi_disable(crtc);
5052
5053                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5054                                                       true);
5055         }
5056 }
5057
5058 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5059 {
5060         struct drm_device *dev = crtc->base.dev;
5061         struct drm_i915_private *dev_priv = to_i915(dev);
5062         struct intel_crtc_state *pipe_config = crtc->config;
5063
5064         if (!pipe_config->gmch_pfit.control)
5065                 return;
5066
5067         /*
5068          * The panel fitter should only be adjusted whilst the pipe is disabled,
5069          * according to register description and PRM.
5070          */
5071         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5072         assert_pipe_disabled(dev_priv, crtc->pipe);
5073
5074         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5075         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5076
5077         /* Border color in case we don't scale up to the full screen. Black by
5078          * default, change to something else for debugging. */
5079         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5080 }
5081
5082 static enum intel_display_power_domain port_to_power_domain(enum port port)
5083 {
5084         switch (port) {
5085         case PORT_A:
5086                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5087         case PORT_B:
5088                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5089         case PORT_C:
5090                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5091         case PORT_D:
5092                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5093         case PORT_E:
5094                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5095         default:
5096                 MISSING_CASE(port);
5097                 return POWER_DOMAIN_PORT_OTHER;
5098         }
5099 }
5100
5101 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5102 {
5103         switch (port) {
5104         case PORT_A:
5105                 return POWER_DOMAIN_AUX_A;
5106         case PORT_B:
5107                 return POWER_DOMAIN_AUX_B;
5108         case PORT_C:
5109                 return POWER_DOMAIN_AUX_C;
5110         case PORT_D:
5111                 return POWER_DOMAIN_AUX_D;
5112         case PORT_E:
5113                 /* FIXME: Check VBT for actual wiring of PORT E */
5114                 return POWER_DOMAIN_AUX_D;
5115         default:
5116                 MISSING_CASE(port);
5117                 return POWER_DOMAIN_AUX_A;
5118         }
5119 }
5120
5121 enum intel_display_power_domain
5122 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5123 {
5124         struct drm_device *dev = intel_encoder->base.dev;
5125         struct intel_digital_port *intel_dig_port;
5126
5127         switch (intel_encoder->type) {
5128         case INTEL_OUTPUT_UNKNOWN:
5129                 /* Only DDI platforms should ever use this output type */
5130                 WARN_ON_ONCE(!HAS_DDI(dev));
5131         case INTEL_OUTPUT_DP:
5132         case INTEL_OUTPUT_HDMI:
5133         case INTEL_OUTPUT_EDP:
5134                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5135                 return port_to_power_domain(intel_dig_port->port);
5136         case INTEL_OUTPUT_DP_MST:
5137                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5138                 return port_to_power_domain(intel_dig_port->port);
5139         case INTEL_OUTPUT_ANALOG:
5140                 return POWER_DOMAIN_PORT_CRT;
5141         case INTEL_OUTPUT_DSI:
5142                 return POWER_DOMAIN_PORT_DSI;
5143         default:
5144                 return POWER_DOMAIN_PORT_OTHER;
5145         }
5146 }
5147
5148 enum intel_display_power_domain
5149 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5150 {
5151         struct drm_device *dev = intel_encoder->base.dev;
5152         struct intel_digital_port *intel_dig_port;
5153
5154         switch (intel_encoder->type) {
5155         case INTEL_OUTPUT_UNKNOWN:
5156         case INTEL_OUTPUT_HDMI:
5157                 /*
5158                  * Only DDI platforms should ever use these output types.
5159                  * We can get here after the HDMI detect code has already set
5160                  * the type of the shared encoder. Since we can't be sure
5161                  * what's the status of the given connectors, play safe and
5162                  * run the DP detection too.
5163                  */
5164                 WARN_ON_ONCE(!HAS_DDI(dev));
5165         case INTEL_OUTPUT_DP:
5166         case INTEL_OUTPUT_EDP:
5167                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5168                 return port_to_aux_power_domain(intel_dig_port->port);
5169         case INTEL_OUTPUT_DP_MST:
5170                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5171                 return port_to_aux_power_domain(intel_dig_port->port);
5172         default:
5173                 MISSING_CASE(intel_encoder->type);
5174                 return POWER_DOMAIN_AUX_A;
5175         }
5176 }
5177
5178 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5179                                             struct intel_crtc_state *crtc_state)
5180 {
5181         struct drm_device *dev = crtc->dev;
5182         struct drm_encoder *encoder;
5183         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5184         enum pipe pipe = intel_crtc->pipe;
5185         unsigned long mask;
5186         enum transcoder transcoder = crtc_state->cpu_transcoder;
5187
5188         if (!crtc_state->base.active)
5189                 return 0;
5190
5191         mask = BIT(POWER_DOMAIN_PIPE(pipe));
5192         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5193         if (crtc_state->pch_pfit.enabled ||
5194             crtc_state->pch_pfit.force_thru)
5195                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5196
5197         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5198                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5199
5200                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5201         }
5202
5203         if (crtc_state->shared_dpll)
5204                 mask |= BIT(POWER_DOMAIN_PLLS);
5205
5206         return mask;
5207 }
5208
5209 static unsigned long
5210 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5211                                struct intel_crtc_state *crtc_state)
5212 {
5213         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5214         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5215         enum intel_display_power_domain domain;
5216         unsigned long domains, new_domains, old_domains;
5217
5218         old_domains = intel_crtc->enabled_power_domains;
5219         intel_crtc->enabled_power_domains = new_domains =
5220                 get_crtc_power_domains(crtc, crtc_state);
5221
5222         domains = new_domains & ~old_domains;
5223
5224         for_each_power_domain(domain, domains)
5225                 intel_display_power_get(dev_priv, domain);
5226
5227         return old_domains & ~new_domains;
5228 }
5229
5230 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5231                                       unsigned long domains)
5232 {
5233         enum intel_display_power_domain domain;
5234
5235         for_each_power_domain(domain, domains)
5236                 intel_display_power_put(dev_priv, domain);
5237 }
5238
5239 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5240 {
5241         int max_cdclk_freq = dev_priv->max_cdclk_freq;
5242
5243         if (INTEL_INFO(dev_priv)->gen >= 9 ||
5244             IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5245                 return max_cdclk_freq;
5246         else if (IS_CHERRYVIEW(dev_priv))
5247                 return max_cdclk_freq*95/100;
5248         else if (INTEL_INFO(dev_priv)->gen < 4)
5249                 return 2*max_cdclk_freq*90/100;
5250         else
5251                 return max_cdclk_freq*90/100;
5252 }
5253
5254 static int skl_calc_cdclk(int max_pixclk, int vco);
5255
5256 static void intel_update_max_cdclk(struct drm_device *dev)
5257 {
5258         struct drm_i915_private *dev_priv = to_i915(dev);
5259
5260         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5261                 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5262                 int max_cdclk, vco;
5263
5264                 vco = dev_priv->skl_preferred_vco_freq;
5265                 WARN_ON(vco != 8100000 && vco != 8640000);
5266
5267                 /*
5268                  * Use the lower (vco 8640) cdclk values as a
5269                  * first guess. skl_calc_cdclk() will correct it
5270                  * if the preferred vco is 8100 instead.
5271                  */
5272                 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5273                         max_cdclk = 617143;
5274                 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5275                         max_cdclk = 540000;
5276                 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5277                         max_cdclk = 432000;
5278                 else
5279                         max_cdclk = 308571;
5280
5281                 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5282         } else if (IS_BROXTON(dev)) {
5283                 dev_priv->max_cdclk_freq = 624000;
5284         } else if (IS_BROADWELL(dev))  {
5285                 /*
5286                  * FIXME with extra cooling we can allow
5287                  * 540 MHz for ULX and 675 Mhz for ULT.
5288                  * How can we know if extra cooling is
5289                  * available? PCI ID, VTB, something else?
5290                  */
5291                 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5292                         dev_priv->max_cdclk_freq = 450000;
5293                 else if (IS_BDW_ULX(dev))
5294                         dev_priv->max_cdclk_freq = 450000;
5295                 else if (IS_BDW_ULT(dev))
5296                         dev_priv->max_cdclk_freq = 540000;
5297                 else
5298                         dev_priv->max_cdclk_freq = 675000;
5299         } else if (IS_CHERRYVIEW(dev)) {
5300                 dev_priv->max_cdclk_freq = 320000;
5301         } else if (IS_VALLEYVIEW(dev)) {
5302                 dev_priv->max_cdclk_freq = 400000;
5303         } else {
5304                 /* otherwise assume cdclk is fixed */
5305                 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5306         }
5307
5308         dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5309
5310         DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5311                          dev_priv->max_cdclk_freq);
5312
5313         DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5314                          dev_priv->max_dotclk_freq);
5315 }
5316
5317 static void intel_update_cdclk(struct drm_device *dev)
5318 {
5319         struct drm_i915_private *dev_priv = to_i915(dev);
5320
5321         dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5322
5323         if (INTEL_GEN(dev_priv) >= 9)
5324                 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5325                                  dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5326                                  dev_priv->cdclk_pll.ref);
5327         else
5328                 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5329                                  dev_priv->cdclk_freq);
5330
5331         /*
5332          * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5333          * Programmng [sic] note: bit[9:2] should be programmed to the number
5334          * of cdclk that generates 4MHz reference clock freq which is used to
5335          * generate GMBus clock. This will vary with the cdclk freq.
5336          */
5337         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5338                 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5339 }
5340
5341 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5342 static int skl_cdclk_decimal(int cdclk)
5343 {
5344         return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5345 }
5346
5347 static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5348 {
5349         int ratio;
5350
5351         if (cdclk == dev_priv->cdclk_pll.ref)
5352                 return 0;
5353
5354         switch (cdclk) {
5355         default:
5356                 MISSING_CASE(cdclk);
5357         case 144000:
5358         case 288000:
5359         case 384000:
5360         case 576000:
5361                 ratio = 60;
5362                 break;
5363         case 624000:
5364                 ratio = 65;
5365                 break;
5366         }
5367
5368         return dev_priv->cdclk_pll.ref * ratio;
5369 }
5370
5371 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5372 {
5373         I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5374
5375         /* Timeout 200us */
5376         if (intel_wait_for_register(dev_priv,
5377                                     BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5378                                     1))
5379                 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5380
5381         dev_priv->cdclk_pll.vco = 0;
5382 }
5383
5384 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5385 {
5386         int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5387         u32 val;
5388
5389         val = I915_READ(BXT_DE_PLL_CTL);
5390         val &= ~BXT_DE_PLL_RATIO_MASK;
5391         val |= BXT_DE_PLL_RATIO(ratio);
5392         I915_WRITE(BXT_DE_PLL_CTL, val);
5393
5394         I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5395
5396         /* Timeout 200us */
5397         if (intel_wait_for_register(dev_priv,
5398                                     BXT_DE_PLL_ENABLE,
5399                                     BXT_DE_PLL_LOCK,
5400                                     BXT_DE_PLL_LOCK,
5401                                     1))
5402                 DRM_ERROR("timeout waiting for DE PLL lock\n");
5403
5404         dev_priv->cdclk_pll.vco = vco;
5405 }
5406
5407 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5408 {
5409         u32 val, divider;
5410         int vco, ret;
5411
5412         vco = bxt_de_pll_vco(dev_priv, cdclk);
5413
5414         DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5415
5416         /* cdclk = vco / 2 / div{1,1.5,2,4} */
5417         switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5418         case 8:
5419                 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5420                 break;
5421         case 4:
5422                 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5423                 break;
5424         case 3:
5425                 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5426                 break;
5427         case 2:
5428                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5429                 break;
5430         default:
5431                 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5432                 WARN_ON(vco != 0);
5433
5434                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5435                 break;
5436         }
5437
5438         /* Inform power controller of upcoming frequency change */
5439         mutex_lock(&dev_priv->rps.hw_lock);
5440         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5441                                       0x80000000);
5442         mutex_unlock(&dev_priv->rps.hw_lock);
5443
5444         if (ret) {
5445                 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5446                           ret, cdclk);
5447                 return;
5448         }
5449
5450         if (dev_priv->cdclk_pll.vco != 0 &&
5451             dev_priv->cdclk_pll.vco != vco)
5452                 bxt_de_pll_disable(dev_priv);
5453
5454         if (dev_priv->cdclk_pll.vco != vco)
5455                 bxt_de_pll_enable(dev_priv, vco);
5456
5457         val = divider | skl_cdclk_decimal(cdclk);
5458         /*
5459          * FIXME if only the cd2x divider needs changing, it could be done
5460          * without shutting off the pipe (if only one pipe is active).
5461          */
5462         val |= BXT_CDCLK_CD2X_PIPE_NONE;
5463         /*
5464          * Disable SSA Precharge when CD clock frequency < 500 MHz,
5465          * enable otherwise.
5466          */
5467         if (cdclk >= 500000)
5468                 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5469         I915_WRITE(CDCLK_CTL, val);
5470
5471         mutex_lock(&dev_priv->rps.hw_lock);
5472         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5473                                       DIV_ROUND_UP(cdclk, 25000));
5474         mutex_unlock(&dev_priv->rps.hw_lock);
5475
5476         if (ret) {
5477                 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5478                           ret, cdclk);
5479                 return;
5480         }
5481
5482         intel_update_cdclk(&dev_priv->drm);
5483 }
5484
5485 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5486 {
5487         u32 cdctl, expected;
5488
5489         intel_update_cdclk(&dev_priv->drm);
5490
5491         if (dev_priv->cdclk_pll.vco == 0 ||
5492             dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5493                 goto sanitize;
5494
5495         /* DPLL okay; verify the cdclock
5496          *
5497          * Some BIOS versions leave an incorrect decimal frequency value and
5498          * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5499          * so sanitize this register.
5500          */
5501         cdctl = I915_READ(CDCLK_CTL);
5502         /*
5503          * Let's ignore the pipe field, since BIOS could have configured the
5504          * dividers both synching to an active pipe, or asynchronously
5505          * (PIPE_NONE).
5506          */
5507         cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5508
5509         expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5510                    skl_cdclk_decimal(dev_priv->cdclk_freq);
5511         /*
5512          * Disable SSA Precharge when CD clock frequency < 500 MHz,
5513          * enable otherwise.
5514          */
5515         if (dev_priv->cdclk_freq >= 500000)
5516                 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5517
5518         if (cdctl == expected)
5519                 /* All well; nothing to sanitize */
5520                 return;
5521
5522 sanitize:
5523         DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5524
5525         /* force cdclk programming */
5526         dev_priv->cdclk_freq = 0;
5527
5528         /* force full PLL disable + enable */
5529         dev_priv->cdclk_pll.vco = -1;
5530 }
5531
5532 void bxt_init_cdclk(struct drm_i915_private *dev_priv)
5533 {
5534         bxt_sanitize_cdclk(dev_priv);
5535
5536         if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5537                 return;
5538
5539         /*
5540          * FIXME:
5541          * - The initial CDCLK needs to be read from VBT.
5542          *   Need to make this change after VBT has changes for BXT.
5543          */
5544         bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
5545 }
5546
5547 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
5548 {
5549         bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5550 }
5551
5552 static int skl_calc_cdclk(int max_pixclk, int vco)
5553 {
5554         if (vco == 8640000) {
5555                 if (max_pixclk > 540000)
5556                         return 617143;
5557                 else if (max_pixclk > 432000)
5558                         return 540000;
5559                 else if (max_pixclk > 308571)
5560                         return 432000;
5561                 else
5562                         return 308571;
5563         } else {
5564                 if (max_pixclk > 540000)
5565                         return 675000;
5566                 else if (max_pixclk > 450000)
5567                         return 540000;
5568                 else if (max_pixclk > 337500)
5569                         return 450000;
5570                 else
5571                         return 337500;
5572         }
5573 }
5574
5575 static void
5576 skl_dpll0_update(struct drm_i915_private *dev_priv)
5577 {
5578         u32 val;
5579
5580         dev_priv->cdclk_pll.ref = 24000;
5581         dev_priv->cdclk_pll.vco = 0;
5582
5583         val = I915_READ(LCPLL1_CTL);
5584         if ((val & LCPLL_PLL_ENABLE) == 0)
5585                 return;
5586
5587         if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5588                 return;
5589
5590         val = I915_READ(DPLL_CTRL1);
5591
5592         if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5593                             DPLL_CTRL1_SSC(SKL_DPLL0) |
5594                             DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5595                     DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5596                 return;
5597
5598         switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5599         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5600         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5601         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5602         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5603                 dev_priv->cdclk_pll.vco = 8100000;
5604                 break;
5605         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5606         case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5607                 dev_priv->cdclk_pll.vco = 8640000;
5608                 break;
5609         default:
5610                 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5611                 break;
5612         }
5613 }
5614
5615 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5616 {
5617         bool changed = dev_priv->skl_preferred_vco_freq != vco;
5618
5619         dev_priv->skl_preferred_vco_freq = vco;
5620
5621         if (changed)
5622                 intel_update_max_cdclk(&dev_priv->drm);
5623 }
5624
5625 static void
5626 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5627 {
5628         int min_cdclk = skl_calc_cdclk(0, vco);
5629         u32 val;
5630
5631         WARN_ON(vco != 8100000 && vco != 8640000);
5632
5633         /* select the minimum CDCLK before enabling DPLL 0 */
5634         val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5635         I915_WRITE(CDCLK_CTL, val);
5636         POSTING_READ(CDCLK_CTL);
5637
5638         /*
5639          * We always enable DPLL0 with the lowest link rate possible, but still
5640          * taking into account the VCO required to operate the eDP panel at the
5641          * desired frequency. The usual DP link rates operate with a VCO of
5642          * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5643          * The modeset code is responsible for the selection of the exact link
5644          * rate later on, with the constraint of choosing a frequency that
5645          * works with vco.
5646          */
5647         val = I915_READ(DPLL_CTRL1);
5648
5649         val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5650                  DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5651         val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5652         if (vco == 8640000)
5653                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5654                                             SKL_DPLL0);
5655         else
5656                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5657                                             SKL_DPLL0);
5658
5659         I915_WRITE(DPLL_CTRL1, val);
5660         POSTING_READ(DPLL_CTRL1);
5661
5662         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5663
5664         if (intel_wait_for_register(dev_priv,
5665                                     LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5666                                     5))
5667                 DRM_ERROR("DPLL0 not locked\n");
5668
5669         dev_priv->cdclk_pll.vco = vco;
5670
5671         /* We'll want to keep using the current vco from now on. */
5672         skl_set_preferred_cdclk_vco(dev_priv, vco);
5673 }
5674
5675 static void
5676 skl_dpll0_disable(struct drm_i915_private *dev_priv)
5677 {
5678         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5679         if (intel_wait_for_register(dev_priv,
5680                                    LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
5681                                    1))
5682                 DRM_ERROR("Couldn't disable DPLL0\n");
5683
5684         dev_priv->cdclk_pll.vco = 0;
5685 }
5686
5687 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5688 {
5689         int ret;
5690         u32 val;
5691
5692         /* inform PCU we want to change CDCLK */
5693         val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5694         mutex_lock(&dev_priv->rps.hw_lock);
5695         ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5696         mutex_unlock(&dev_priv->rps.hw_lock);
5697
5698         return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5699 }
5700
5701 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5702 {
5703         unsigned int i;
5704
5705         for (i = 0; i < 15; i++) {
5706                 if (skl_cdclk_pcu_ready(dev_priv))
5707                         return true;
5708                 udelay(10);
5709         }
5710
5711         return false;
5712 }
5713
5714 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5715 {
5716         struct drm_device *dev = &dev_priv->drm;
5717         u32 freq_select, pcu_ack;
5718
5719         WARN_ON((cdclk == 24000) != (vco == 0));
5720
5721         DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5722
5723         if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5724                 DRM_ERROR("failed to inform PCU about cdclk change\n");
5725                 return;
5726         }
5727
5728         /* set CDCLK_CTL */
5729         switch (cdclk) {
5730         case 450000:
5731         case 432000:
5732                 freq_select = CDCLK_FREQ_450_432;
5733                 pcu_ack = 1;
5734                 break;
5735         case 540000:
5736                 freq_select = CDCLK_FREQ_540;
5737                 pcu_ack = 2;
5738                 break;
5739         case 308571:
5740         case 337500:
5741         default:
5742                 freq_select = CDCLK_FREQ_337_308;
5743                 pcu_ack = 0;
5744                 break;
5745         case 617143:
5746         case 675000:
5747                 freq_select = CDCLK_FREQ_675_617;
5748                 pcu_ack = 3;
5749                 break;
5750         }
5751
5752         if (dev_priv->cdclk_pll.vco != 0 &&
5753             dev_priv->cdclk_pll.vco != vco)
5754                 skl_dpll0_disable(dev_priv);
5755
5756         if (dev_priv->cdclk_pll.vco != vco)
5757                 skl_dpll0_enable(dev_priv, vco);
5758
5759         I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5760         POSTING_READ(CDCLK_CTL);
5761
5762         /* inform PCU of the change */
5763         mutex_lock(&dev_priv->rps.hw_lock);
5764         sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5765         mutex_unlock(&dev_priv->rps.hw_lock);
5766
5767         intel_update_cdclk(dev);
5768 }
5769
5770 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5771
5772 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5773 {
5774         skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5775 }
5776
5777 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5778 {
5779         int cdclk, vco;
5780
5781         skl_sanitize_cdclk(dev_priv);
5782
5783         if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5784                 /*
5785                  * Use the current vco as our initial
5786                  * guess as to what the preferred vco is.
5787                  */
5788                 if (dev_priv->skl_preferred_vco_freq == 0)
5789                         skl_set_preferred_cdclk_vco(dev_priv,
5790                                                     dev_priv->cdclk_pll.vco);
5791                 return;
5792         }
5793
5794         vco = dev_priv->skl_preferred_vco_freq;
5795         if (vco == 0)
5796                 vco = 8100000;
5797         cdclk = skl_calc_cdclk(0, vco);
5798
5799         skl_set_cdclk(dev_priv, cdclk, vco);
5800 }
5801
5802 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5803 {
5804         uint32_t cdctl, expected;
5805
5806         /*
5807          * check if the pre-os intialized the display
5808          * There is SWF18 scratchpad register defined which is set by the
5809          * pre-os which can be used by the OS drivers to check the status
5810          */
5811         if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5812                 goto sanitize;
5813
5814         intel_update_cdclk(&dev_priv->drm);
5815         /* Is PLL enabled and locked ? */
5816         if (dev_priv->cdclk_pll.vco == 0 ||
5817             dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5818                 goto sanitize;
5819
5820         /* DPLL okay; verify the cdclock
5821          *
5822          * Noticed in some instances that the freq selection is correct but
5823          * decimal part is programmed wrong from BIOS where pre-os does not
5824          * enable display. Verify the same as well.
5825          */
5826         cdctl = I915_READ(CDCLK_CTL);
5827         expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5828                 skl_cdclk_decimal(dev_priv->cdclk_freq);
5829         if (cdctl == expected)
5830                 /* All well; nothing to sanitize */
5831                 return;
5832
5833 sanitize:
5834         DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5835
5836         /* force cdclk programming */
5837         dev_priv->cdclk_freq = 0;
5838         /* force full PLL disable + enable */
5839         dev_priv->cdclk_pll.vco = -1;
5840 }
5841
5842 /* Adjust CDclk dividers to allow high res or save power if possible */
5843 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5844 {
5845         struct drm_i915_private *dev_priv = to_i915(dev);
5846         u32 val, cmd;
5847
5848         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5849                                         != dev_priv->cdclk_freq);
5850
5851         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5852                 cmd = 2;
5853         else if (cdclk == 266667)
5854                 cmd = 1;
5855         else
5856                 cmd = 0;
5857
5858         mutex_lock(&dev_priv->rps.hw_lock);
5859         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5860         val &= ~DSPFREQGUAR_MASK;
5861         val |= (cmd << DSPFREQGUAR_SHIFT);
5862         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5863         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5864                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5865                      50)) {
5866                 DRM_ERROR("timed out waiting for CDclk change\n");
5867         }
5868         mutex_unlock(&dev_priv->rps.hw_lock);
5869
5870         mutex_lock(&dev_priv->sb_lock);
5871
5872         if (cdclk == 400000) {
5873                 u32 divider;
5874
5875                 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5876
5877                 /* adjust cdclk divider */
5878                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5879                 val &= ~CCK_FREQUENCY_VALUES;
5880                 val |= divider;
5881                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5882
5883                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5884                               CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5885                              50))
5886                         DRM_ERROR("timed out waiting for CDclk change\n");
5887         }
5888
5889         /* adjust self-refresh exit latency value */
5890         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5891         val &= ~0x7f;
5892
5893         /*
5894          * For high bandwidth configs, we set a higher latency in the bunit
5895          * so that the core display fetch happens in time to avoid underruns.
5896          */
5897         if (cdclk == 400000)
5898                 val |= 4500 / 250; /* 4.5 usec */
5899         else
5900                 val |= 3000 / 250; /* 3.0 usec */
5901         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5902
5903         mutex_unlock(&dev_priv->sb_lock);
5904
5905         intel_update_cdclk(dev);
5906 }
5907
5908 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5909 {
5910         struct drm_i915_private *dev_priv = to_i915(dev);
5911         u32 val, cmd;
5912
5913         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5914                                                 != dev_priv->cdclk_freq);
5915
5916         switch (cdclk) {
5917         case 333333:
5918         case 320000:
5919         case 266667:
5920         case 200000:
5921                 break;
5922         default:
5923                 MISSING_CASE(cdclk);
5924                 return;
5925         }
5926
5927         /*
5928          * Specs are full of misinformation, but testing on actual
5929          * hardware has shown that we just need to write the desired
5930          * CCK divider into the Punit register.
5931          */
5932         cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5933
5934         mutex_lock(&dev_priv->rps.hw_lock);
5935         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5936         val &= ~DSPFREQGUAR_MASK_CHV;
5937         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5938         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5939         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5940                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5941                      50)) {
5942                 DRM_ERROR("timed out waiting for CDclk change\n");
5943         }
5944         mutex_unlock(&dev_priv->rps.hw_lock);
5945
5946         intel_update_cdclk(dev);
5947 }
5948
5949 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5950                                  int max_pixclk)
5951 {
5952         int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5953         int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5954
5955         /*
5956          * Really only a few cases to deal with, as only 4 CDclks are supported:
5957          *   200MHz
5958          *   267MHz
5959          *   320/333MHz (depends on HPLL freq)
5960          *   400MHz (VLV only)
5961          * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5962          * of the lower bin and adjust if needed.
5963          *
5964          * We seem to get an unstable or solid color picture at 200MHz.
5965          * Not sure what's wrong. For now use 200MHz only when all pipes
5966          * are off.
5967          */
5968         if (!IS_CHERRYVIEW(dev_priv) &&
5969             max_pixclk > freq_320*limit/100)
5970                 return 400000;
5971         else if (max_pixclk > 266667*limit/100)
5972                 return freq_320;
5973         else if (max_pixclk > 0)
5974                 return 266667;
5975         else
5976                 return 200000;
5977 }
5978
5979 static int bxt_calc_cdclk(int max_pixclk)
5980 {
5981         if (max_pixclk > 576000)
5982                 return 624000;
5983         else if (max_pixclk > 384000)
5984                 return 576000;
5985         else if (max_pixclk > 288000)
5986                 return 384000;
5987         else if (max_pixclk > 144000)
5988                 return 288000;
5989         else
5990                 return 144000;
5991 }
5992
5993 /* Compute the max pixel clock for new configuration. */
5994 static int intel_mode_max_pixclk(struct drm_device *dev,
5995                                  struct drm_atomic_state *state)
5996 {
5997         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5998         struct drm_i915_private *dev_priv = to_i915(dev);
5999         struct drm_crtc *crtc;
6000         struct drm_crtc_state *crtc_state;
6001         unsigned max_pixclk = 0, i;
6002         enum pipe pipe;
6003
6004         memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6005                sizeof(intel_state->min_pixclk));
6006
6007         for_each_crtc_in_state(state, crtc, crtc_state, i) {
6008                 int pixclk = 0;
6009
6010                 if (crtc_state->enable)
6011                         pixclk = crtc_state->adjusted_mode.crtc_clock;
6012
6013                 intel_state->min_pixclk[i] = pixclk;
6014         }
6015
6016         for_each_pipe(dev_priv, pipe)
6017                 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6018
6019         return max_pixclk;
6020 }
6021
6022 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6023 {
6024         struct drm_device *dev = state->dev;
6025         struct drm_i915_private *dev_priv = to_i915(dev);
6026         int max_pixclk = intel_mode_max_pixclk(dev, state);
6027         struct intel_atomic_state *intel_state =
6028                 to_intel_atomic_state(state);
6029
6030         intel_state->cdclk = intel_state->dev_cdclk =
6031                 valleyview_calc_cdclk(dev_priv, max_pixclk);
6032
6033         if (!intel_state->active_crtcs)
6034                 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6035
6036         return 0;
6037 }
6038
6039 static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
6040 {
6041         int max_pixclk = ilk_max_pixel_rate(state);
6042         struct intel_atomic_state *intel_state =
6043                 to_intel_atomic_state(state);
6044
6045         intel_state->cdclk = intel_state->dev_cdclk =
6046                 bxt_calc_cdclk(max_pixclk);
6047
6048         if (!intel_state->active_crtcs)
6049                 intel_state->dev_cdclk = bxt_calc_cdclk(0);
6050
6051         return 0;
6052 }
6053
6054 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6055 {
6056         unsigned int credits, default_credits;
6057
6058         if (IS_CHERRYVIEW(dev_priv))
6059                 default_credits = PFI_CREDIT(12);
6060         else
6061                 default_credits = PFI_CREDIT(8);
6062
6063         if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6064                 /* CHV suggested value is 31 or 63 */
6065                 if (IS_CHERRYVIEW(dev_priv))
6066                         credits = PFI_CREDIT_63;
6067                 else
6068                         credits = PFI_CREDIT(15);
6069         } else {
6070                 credits = default_credits;
6071         }
6072
6073         /*
6074          * WA - write default credits before re-programming
6075          * FIXME: should we also set the resend bit here?
6076          */
6077         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6078                    default_credits);
6079
6080         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6081                    credits | PFI_CREDIT_RESEND);
6082
6083         /*
6084          * FIXME is this guaranteed to clear
6085          * immediately or should we poll for it?
6086          */
6087         WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6088 }
6089
6090 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6091 {
6092         struct drm_device *dev = old_state->dev;
6093         struct drm_i915_private *dev_priv = to_i915(dev);
6094         struct intel_atomic_state *old_intel_state =
6095                 to_intel_atomic_state(old_state);
6096         unsigned req_cdclk = old_intel_state->dev_cdclk;
6097
6098         /*
6099          * FIXME: We can end up here with all power domains off, yet
6100          * with a CDCLK frequency other than the minimum. To account
6101          * for this take the PIPE-A power domain, which covers the HW
6102          * blocks needed for the following programming. This can be
6103          * removed once it's guaranteed that we get here either with
6104          * the minimum CDCLK set, or the required power domains
6105          * enabled.
6106          */
6107         intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6108
6109         if (IS_CHERRYVIEW(dev))
6110                 cherryview_set_cdclk(dev, req_cdclk);
6111         else
6112                 valleyview_set_cdclk(dev, req_cdclk);
6113
6114         vlv_program_pfi_credits(dev_priv);
6115
6116         intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6117 }
6118
6119 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6120 {
6121         struct drm_device *dev = crtc->dev;
6122         struct drm_i915_private *dev_priv = to_i915(dev);
6123         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6124         struct intel_encoder *encoder;
6125         struct intel_crtc_state *pipe_config =
6126                 to_intel_crtc_state(crtc->state);
6127         int pipe = intel_crtc->pipe;
6128
6129         if (WARN_ON(intel_crtc->active))
6130                 return;
6131
6132         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6133                 intel_dp_set_m_n(intel_crtc, M1_N1);
6134
6135         intel_set_pipe_timings(intel_crtc);
6136         intel_set_pipe_src_size(intel_crtc);
6137
6138         if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6139                 struct drm_i915_private *dev_priv = to_i915(dev);
6140
6141                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6142                 I915_WRITE(CHV_CANVAS(pipe), 0);
6143         }
6144
6145         i9xx_set_pipeconf(intel_crtc);
6146
6147         intel_crtc->active = true;
6148
6149         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6150
6151         for_each_encoder_on_crtc(dev, crtc, encoder)
6152                 if (encoder->pre_pll_enable)
6153                         encoder->pre_pll_enable(encoder);
6154
6155         if (IS_CHERRYVIEW(dev)) {
6156                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6157                 chv_enable_pll(intel_crtc, intel_crtc->config);
6158         } else {
6159                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6160                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6161         }
6162
6163         for_each_encoder_on_crtc(dev, crtc, encoder)
6164                 if (encoder->pre_enable)
6165                         encoder->pre_enable(encoder);
6166
6167         i9xx_pfit_enable(intel_crtc);
6168
6169         intel_color_load_luts(&pipe_config->base);
6170
6171         intel_update_watermarks(crtc);
6172         intel_enable_pipe(intel_crtc);
6173
6174         assert_vblank_disabled(crtc);
6175         drm_crtc_vblank_on(crtc);
6176
6177         for_each_encoder_on_crtc(dev, crtc, encoder)
6178                 encoder->enable(encoder);
6179 }
6180
6181 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6182 {
6183         struct drm_device *dev = crtc->base.dev;
6184         struct drm_i915_private *dev_priv = to_i915(dev);
6185
6186         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6187         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6188 }
6189
6190 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6191 {
6192         struct drm_device *dev = crtc->dev;
6193         struct drm_i915_private *dev_priv = to_i915(dev);
6194         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6195         struct intel_encoder *encoder;
6196         struct intel_crtc_state *pipe_config =
6197                 to_intel_crtc_state(crtc->state);
6198         enum pipe pipe = intel_crtc->pipe;
6199
6200         if (WARN_ON(intel_crtc->active))
6201                 return;
6202
6203         i9xx_set_pll_dividers(intel_crtc);
6204
6205         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6206                 intel_dp_set_m_n(intel_crtc, M1_N1);
6207
6208         intel_set_pipe_timings(intel_crtc);
6209         intel_set_pipe_src_size(intel_crtc);
6210
6211         i9xx_set_pipeconf(intel_crtc);
6212
6213         intel_crtc->active = true;
6214
6215         if (!IS_GEN2(dev))
6216                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6217
6218         for_each_encoder_on_crtc(dev, crtc, encoder)
6219                 if (encoder->pre_enable)
6220                         encoder->pre_enable(encoder);
6221
6222         i9xx_enable_pll(intel_crtc);
6223
6224         i9xx_pfit_enable(intel_crtc);
6225
6226         intel_color_load_luts(&pipe_config->base);
6227
6228         intel_update_watermarks(crtc);
6229         intel_enable_pipe(intel_crtc);
6230
6231         assert_vblank_disabled(crtc);
6232         drm_crtc_vblank_on(crtc);
6233
6234         for_each_encoder_on_crtc(dev, crtc, encoder)
6235                 encoder->enable(encoder);
6236 }
6237
6238 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6239 {
6240         struct drm_device *dev = crtc->base.dev;
6241         struct drm_i915_private *dev_priv = to_i915(dev);
6242
6243         if (!crtc->config->gmch_pfit.control)
6244                 return;
6245
6246         assert_pipe_disabled(dev_priv, crtc->pipe);
6247
6248         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6249                          I915_READ(PFIT_CONTROL));
6250         I915_WRITE(PFIT_CONTROL, 0);
6251 }
6252
6253 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6254 {
6255         struct drm_device *dev = crtc->dev;
6256         struct drm_i915_private *dev_priv = to_i915(dev);
6257         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6258         struct intel_encoder *encoder;
6259         int pipe = intel_crtc->pipe;
6260
6261         /*
6262          * On gen2 planes are double buffered but the pipe isn't, so we must
6263          * wait for planes to fully turn off before disabling the pipe.
6264          */
6265         if (IS_GEN2(dev))
6266                 intel_wait_for_vblank(dev, pipe);
6267
6268         for_each_encoder_on_crtc(dev, crtc, encoder)
6269                 encoder->disable(encoder);
6270
6271         drm_crtc_vblank_off(crtc);
6272         assert_vblank_disabled(crtc);
6273
6274         intel_disable_pipe(intel_crtc);
6275
6276         i9xx_pfit_disable(intel_crtc);
6277
6278         for_each_encoder_on_crtc(dev, crtc, encoder)
6279                 if (encoder->post_disable)
6280                         encoder->post_disable(encoder);
6281
6282         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6283                 if (IS_CHERRYVIEW(dev))
6284                         chv_disable_pll(dev_priv, pipe);
6285                 else if (IS_VALLEYVIEW(dev))
6286                         vlv_disable_pll(dev_priv, pipe);
6287                 else
6288                         i9xx_disable_pll(intel_crtc);
6289         }
6290
6291         for_each_encoder_on_crtc(dev, crtc, encoder)
6292                 if (encoder->post_pll_disable)
6293                         encoder->post_pll_disable(encoder);
6294
6295         if (!IS_GEN2(dev))
6296                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6297 }
6298
6299 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6300 {
6301         struct intel_encoder *encoder;
6302         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6303         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6304         enum intel_display_power_domain domain;
6305         unsigned long domains;
6306
6307         if (!intel_crtc->active)
6308                 return;
6309
6310         if (to_intel_plane_state(crtc->primary->state)->visible) {
6311                 WARN_ON(intel_crtc->flip_work);
6312
6313                 intel_pre_disable_primary_noatomic(crtc);
6314
6315                 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6316                 to_intel_plane_state(crtc->primary->state)->visible = false;
6317         }
6318
6319         dev_priv->display.crtc_disable(crtc);
6320
6321         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6322                       crtc->base.id, crtc->name);
6323
6324         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6325         crtc->state->active = false;
6326         intel_crtc->active = false;
6327         crtc->enabled = false;
6328         crtc->state->connector_mask = 0;
6329         crtc->state->encoder_mask = 0;
6330
6331         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6332                 encoder->base.crtc = NULL;
6333
6334         intel_fbc_disable(intel_crtc);
6335         intel_update_watermarks(crtc);
6336         intel_disable_shared_dpll(intel_crtc);
6337
6338         domains = intel_crtc->enabled_power_domains;
6339         for_each_power_domain(domain, domains)
6340                 intel_display_power_put(dev_priv, domain);
6341         intel_crtc->enabled_power_domains = 0;
6342
6343         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6344         dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6345 }
6346
6347 /*
6348  * turn all crtc's off, but do not adjust state
6349  * This has to be paired with a call to intel_modeset_setup_hw_state.
6350  */
6351 int intel_display_suspend(struct drm_device *dev)
6352 {
6353         struct drm_i915_private *dev_priv = to_i915(dev);
6354         struct drm_atomic_state *state;
6355         int ret;
6356
6357         state = drm_atomic_helper_suspend(dev);
6358         ret = PTR_ERR_OR_ZERO(state);
6359         if (ret)
6360                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6361         else
6362                 dev_priv->modeset_restore_state = state;
6363         return ret;
6364 }
6365
6366 void intel_encoder_destroy(struct drm_encoder *encoder)
6367 {
6368         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6369
6370         drm_encoder_cleanup(encoder);
6371         kfree(intel_encoder);
6372 }
6373
6374 /* Cross check the actual hw state with our own modeset state tracking (and it's
6375  * internal consistency). */
6376 static void intel_connector_verify_state(struct intel_connector *connector)
6377 {
6378         struct drm_crtc *crtc = connector->base.state->crtc;
6379
6380         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6381                       connector->base.base.id,
6382                       connector->base.name);
6383
6384         if (connector->get_hw_state(connector)) {
6385                 struct intel_encoder *encoder = connector->encoder;
6386                 struct drm_connector_state *conn_state = connector->base.state;
6387
6388                 I915_STATE_WARN(!crtc,
6389                          "connector enabled without attached crtc\n");
6390
6391                 if (!crtc)
6392                         return;
6393
6394                 I915_STATE_WARN(!crtc->state->active,
6395                       "connector is active, but attached crtc isn't\n");
6396
6397                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6398                         return;
6399
6400                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6401                         "atomic encoder doesn't match attached encoder\n");
6402
6403                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6404                         "attached encoder crtc differs from connector crtc\n");
6405         } else {
6406                 I915_STATE_WARN(crtc && crtc->state->active,
6407                         "attached crtc is active, but connector isn't\n");
6408                 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6409                         "best encoder set without crtc!\n");
6410         }
6411 }
6412
6413 int intel_connector_init(struct intel_connector *connector)
6414 {
6415         drm_atomic_helper_connector_reset(&connector->base);
6416
6417         if (!connector->base.state)
6418                 return -ENOMEM;
6419
6420         return 0;
6421 }
6422
6423 struct intel_connector *intel_connector_alloc(void)
6424 {
6425         struct intel_connector *connector;
6426
6427         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6428         if (!connector)
6429                 return NULL;
6430
6431         if (intel_connector_init(connector) < 0) {
6432                 kfree(connector);
6433                 return NULL;
6434         }
6435
6436         return connector;
6437 }
6438
6439 /* Simple connector->get_hw_state implementation for encoders that support only
6440  * one connector and no cloning and hence the encoder state determines the state
6441  * of the connector. */
6442 bool intel_connector_get_hw_state(struct intel_connector *connector)
6443 {
6444         enum pipe pipe = 0;
6445         struct intel_encoder *encoder = connector->encoder;
6446
6447         return encoder->get_hw_state(encoder, &pipe);
6448 }
6449
6450 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6451 {
6452         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6453                 return crtc_state->fdi_lanes;
6454
6455         return 0;
6456 }
6457
6458 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6459                                      struct intel_crtc_state *pipe_config)
6460 {
6461         struct drm_atomic_state *state = pipe_config->base.state;
6462         struct intel_crtc *other_crtc;
6463         struct intel_crtc_state *other_crtc_state;
6464
6465         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6466                       pipe_name(pipe), pipe_config->fdi_lanes);
6467         if (pipe_config->fdi_lanes > 4) {
6468                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6469                               pipe_name(pipe), pipe_config->fdi_lanes);
6470                 return -EINVAL;
6471         }
6472
6473         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6474                 if (pipe_config->fdi_lanes > 2) {
6475                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6476                                       pipe_config->fdi_lanes);
6477                         return -EINVAL;
6478                 } else {
6479                         return 0;
6480                 }
6481         }
6482
6483         if (INTEL_INFO(dev)->num_pipes == 2)
6484                 return 0;
6485
6486         /* Ivybridge 3 pipe is really complicated */
6487         switch (pipe) {
6488         case PIPE_A:
6489                 return 0;
6490         case PIPE_B:
6491                 if (pipe_config->fdi_lanes <= 2)
6492                         return 0;
6493
6494                 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6495                 other_crtc_state =
6496                         intel_atomic_get_crtc_state(state, other_crtc);
6497                 if (IS_ERR(other_crtc_state))
6498                         return PTR_ERR(other_crtc_state);
6499
6500                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6501                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6502                                       pipe_name(pipe), pipe_config->fdi_lanes);
6503                         return -EINVAL;
6504                 }
6505                 return 0;
6506         case PIPE_C:
6507                 if (pipe_config->fdi_lanes > 2) {
6508                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6509                                       pipe_name(pipe), pipe_config->fdi_lanes);
6510                         return -EINVAL;
6511                 }
6512
6513                 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6514                 other_crtc_state =
6515                         intel_atomic_get_crtc_state(state, other_crtc);
6516                 if (IS_ERR(other_crtc_state))
6517                         return PTR_ERR(other_crtc_state);
6518
6519                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6520                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6521                         return -EINVAL;
6522                 }
6523                 return 0;
6524         default:
6525                 BUG();
6526         }
6527 }
6528
6529 #define RETRY 1
6530 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6531                                        struct intel_crtc_state *pipe_config)
6532 {
6533         struct drm_device *dev = intel_crtc->base.dev;
6534         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6535         int lane, link_bw, fdi_dotclock, ret;
6536         bool needs_recompute = false;
6537
6538 retry:
6539         /* FDI is a binary signal running at ~2.7GHz, encoding
6540          * each output octet as 10 bits. The actual frequency
6541          * is stored as a divider into a 100MHz clock, and the
6542          * mode pixel clock is stored in units of 1KHz.
6543          * Hence the bw of each lane in terms of the mode signal
6544          * is:
6545          */
6546         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6547
6548         fdi_dotclock = adjusted_mode->crtc_clock;
6549
6550         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6551                                            pipe_config->pipe_bpp);
6552
6553         pipe_config->fdi_lanes = lane;
6554
6555         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6556                                link_bw, &pipe_config->fdi_m_n);
6557
6558         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6559         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6560                 pipe_config->pipe_bpp -= 2*3;
6561                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6562                               pipe_config->pipe_bpp);
6563                 needs_recompute = true;
6564                 pipe_config->bw_constrained = true;
6565
6566                 goto retry;
6567         }
6568
6569         if (needs_recompute)
6570                 return RETRY;
6571
6572         return ret;
6573 }
6574
6575 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6576                                      struct intel_crtc_state *pipe_config)
6577 {
6578         if (pipe_config->pipe_bpp > 24)
6579                 return false;
6580
6581         /* HSW can handle pixel rate up to cdclk? */
6582         if (IS_HASWELL(dev_priv))
6583                 return true;
6584
6585         /*
6586          * We compare against max which means we must take
6587          * the increased cdclk requirement into account when
6588          * calculating the new cdclk.
6589          *
6590          * Should measure whether using a lower cdclk w/o IPS
6591          */
6592         return ilk_pipe_pixel_rate(pipe_config) <=
6593                 dev_priv->max_cdclk_freq * 95 / 100;
6594 }
6595
6596 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6597                                    struct intel_crtc_state *pipe_config)
6598 {
6599         struct drm_device *dev = crtc->base.dev;
6600         struct drm_i915_private *dev_priv = to_i915(dev);
6601
6602         pipe_config->ips_enabled = i915.enable_ips &&
6603                 hsw_crtc_supports_ips(crtc) &&
6604                 pipe_config_supports_ips(dev_priv, pipe_config);
6605 }
6606
6607 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6608 {
6609         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6610
6611         /* GDG double wide on either pipe, otherwise pipe A only */
6612         return INTEL_INFO(dev_priv)->gen < 4 &&
6613                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6614 }
6615
6616 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6617                                      struct intel_crtc_state *pipe_config)
6618 {
6619         struct drm_device *dev = crtc->base.dev;
6620         struct drm_i915_private *dev_priv = to_i915(dev);
6621         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6622         int clock_limit = dev_priv->max_dotclk_freq;
6623
6624         if (INTEL_INFO(dev)->gen < 4) {
6625                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6626
6627                 /*
6628                  * Enable double wide mode when the dot clock
6629                  * is > 90% of the (display) core speed.
6630                  */
6631                 if (intel_crtc_supports_double_wide(crtc) &&
6632                     adjusted_mode->crtc_clock > clock_limit) {
6633                         clock_limit = dev_priv->max_dotclk_freq;
6634                         pipe_config->double_wide = true;
6635                 }
6636         }
6637
6638         if (adjusted_mode->crtc_clock > clock_limit) {
6639                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6640                               adjusted_mode->crtc_clock, clock_limit,
6641                               yesno(pipe_config->double_wide));
6642                 return -EINVAL;
6643         }
6644
6645         /*
6646          * Pipe horizontal size must be even in:
6647          * - DVO ganged mode
6648          * - LVDS dual channel mode
6649          * - Double wide pipe
6650          */
6651         if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6652              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6653                 pipe_config->pipe_src_w &= ~1;
6654
6655         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6656          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6657          */
6658         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6659                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6660                 return -EINVAL;
6661
6662         if (HAS_IPS(dev))
6663                 hsw_compute_ips_config(crtc, pipe_config);
6664
6665         if (pipe_config->has_pch_encoder)
6666                 return ironlake_fdi_compute_config(crtc, pipe_config);
6667
6668         return 0;
6669 }
6670
6671 static int skylake_get_display_clock_speed(struct drm_device *dev)
6672 {
6673         struct drm_i915_private *dev_priv = to_i915(dev);
6674         uint32_t cdctl;
6675
6676         skl_dpll0_update(dev_priv);
6677
6678         if (dev_priv->cdclk_pll.vco == 0)
6679                 return dev_priv->cdclk_pll.ref;
6680
6681         cdctl = I915_READ(CDCLK_CTL);
6682
6683         if (dev_priv->cdclk_pll.vco == 8640000) {
6684                 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6685                 case CDCLK_FREQ_450_432:
6686                         return 432000;
6687                 case CDCLK_FREQ_337_308:
6688                         return 308571;
6689                 case CDCLK_FREQ_540:
6690                         return 540000;
6691                 case CDCLK_FREQ_675_617:
6692                         return 617143;
6693                 default:
6694                         MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6695                 }
6696         } else {
6697                 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6698                 case CDCLK_FREQ_450_432:
6699                         return 450000;
6700                 case CDCLK_FREQ_337_308:
6701                         return 337500;
6702                 case CDCLK_FREQ_540:
6703                         return 540000;
6704                 case CDCLK_FREQ_675_617:
6705                         return 675000;
6706                 default:
6707                         MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6708                 }
6709         }
6710
6711         return dev_priv->cdclk_pll.ref;
6712 }
6713
6714 static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6715 {
6716         u32 val;
6717
6718         dev_priv->cdclk_pll.ref = 19200;
6719         dev_priv->cdclk_pll.vco = 0;
6720
6721         val = I915_READ(BXT_DE_PLL_ENABLE);
6722         if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6723                 return;
6724
6725         if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6726                 return;
6727
6728         val = I915_READ(BXT_DE_PLL_CTL);
6729         dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6730                 dev_priv->cdclk_pll.ref;
6731 }
6732
6733 static int broxton_get_display_clock_speed(struct drm_device *dev)
6734 {
6735         struct drm_i915_private *dev_priv = to_i915(dev);
6736         u32 divider;
6737         int div, vco;
6738
6739         bxt_de_pll_update(dev_priv);
6740
6741         vco = dev_priv->cdclk_pll.vco;
6742         if (vco == 0)
6743                 return dev_priv->cdclk_pll.ref;
6744
6745         divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6746
6747         switch (divider) {
6748         case BXT_CDCLK_CD2X_DIV_SEL_1:
6749                 div = 2;
6750                 break;
6751         case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6752                 div = 3;
6753                 break;
6754         case BXT_CDCLK_CD2X_DIV_SEL_2:
6755                 div = 4;
6756                 break;
6757         case BXT_CDCLK_CD2X_DIV_SEL_4:
6758                 div = 8;
6759                 break;
6760         default:
6761                 MISSING_CASE(divider);
6762                 return dev_priv->cdclk_pll.ref;
6763         }
6764
6765         return DIV_ROUND_CLOSEST(vco, div);
6766 }
6767
6768 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6769 {
6770         struct drm_i915_private *dev_priv = to_i915(dev);
6771         uint32_t lcpll = I915_READ(LCPLL_CTL);
6772         uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6773
6774         if (lcpll & LCPLL_CD_SOURCE_FCLK)
6775                 return 800000;
6776         else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6777                 return 450000;
6778         else if (freq == LCPLL_CLK_FREQ_450)
6779                 return 450000;
6780         else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6781                 return 540000;
6782         else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6783                 return 337500;
6784         else
6785                 return 675000;
6786 }
6787
6788 static int haswell_get_display_clock_speed(struct drm_device *dev)
6789 {
6790         struct drm_i915_private *dev_priv = to_i915(dev);
6791         uint32_t lcpll = I915_READ(LCPLL_CTL);
6792         uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6793
6794         if (lcpll & LCPLL_CD_SOURCE_FCLK)
6795                 return 800000;
6796         else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6797                 return 450000;
6798         else if (freq == LCPLL_CLK_FREQ_450)
6799                 return 450000;
6800         else if (IS_HSW_ULT(dev))
6801                 return 337500;
6802         else
6803                 return 540000;
6804 }
6805
6806 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6807 {
6808         return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6809                                       CCK_DISPLAY_CLOCK_CONTROL);
6810 }
6811
6812 static int ilk_get_display_clock_speed(struct drm_device *dev)
6813 {
6814         return 450000;
6815 }
6816
6817 static int i945_get_display_clock_speed(struct drm_device *dev)
6818 {
6819         return 400000;
6820 }
6821
6822 static int i915_get_display_clock_speed(struct drm_device *dev)
6823 {
6824         return 333333;
6825 }
6826
6827 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6828 {
6829         return 200000;
6830 }
6831
6832 static int pnv_get_display_clock_speed(struct drm_device *dev)
6833 {
6834         u16 gcfgc = 0;
6835
6836         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6837
6838         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6839         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6840                 return 266667;
6841         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6842                 return 333333;
6843         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6844                 return 444444;
6845         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6846                 return 200000;
6847         default:
6848                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6849         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6850                 return 133333;
6851         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6852                 return 166667;
6853         }
6854 }
6855
6856 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6857 {
6858         u16 gcfgc = 0;
6859
6860         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6861
6862         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6863                 return 133333;
6864         else {
6865                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6866                 case GC_DISPLAY_CLOCK_333_MHZ:
6867                         return 333333;
6868                 default:
6869                 case GC_DISPLAY_CLOCK_190_200_MHZ:
6870                         return 190000;
6871                 }
6872         }
6873 }
6874
6875 static int i865_get_display_clock_speed(struct drm_device *dev)
6876 {
6877         return 266667;
6878 }
6879
6880 static int i85x_get_display_clock_speed(struct drm_device *dev)
6881 {
6882         u16 hpllcc = 0;
6883
6884         /*
6885          * 852GM/852GMV only supports 133 MHz and the HPLLCC
6886          * encoding is different :(
6887          * FIXME is this the right way to detect 852GM/852GMV?
6888          */
6889         if (dev->pdev->revision == 0x1)
6890                 return 133333;
6891
6892         pci_bus_read_config_word(dev->pdev->bus,
6893                                  PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6894
6895         /* Assume that the hardware is in the high speed state.  This
6896          * should be the default.
6897          */
6898         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6899         case GC_CLOCK_133_200:
6900         case GC_CLOCK_133_200_2:
6901         case GC_CLOCK_100_200:
6902                 return 200000;
6903         case GC_CLOCK_166_250:
6904                 return 250000;
6905         case GC_CLOCK_100_133:
6906                 return 133333;
6907         case GC_CLOCK_133_266:
6908         case GC_CLOCK_133_266_2:
6909         case GC_CLOCK_166_266:
6910                 return 266667;
6911         }
6912
6913         /* Shouldn't happen */
6914         return 0;
6915 }
6916
6917 static int i830_get_display_clock_speed(struct drm_device *dev)
6918 {
6919         return 133333;
6920 }
6921
6922 static unsigned int intel_hpll_vco(struct drm_device *dev)
6923 {
6924         struct drm_i915_private *dev_priv = to_i915(dev);
6925         static const unsigned int blb_vco[8] = {
6926                 [0] = 3200000,
6927                 [1] = 4000000,
6928                 [2] = 5333333,
6929                 [3] = 4800000,
6930                 [4] = 6400000,
6931         };
6932         static const unsigned int pnv_vco[8] = {
6933                 [0] = 3200000,
6934                 [1] = 4000000,
6935                 [2] = 5333333,
6936                 [3] = 4800000,
6937                 [4] = 2666667,
6938         };
6939         static const unsigned int cl_vco[8] = {
6940                 [0] = 3200000,
6941                 [1] = 4000000,
6942                 [2] = 5333333,
6943                 [3] = 6400000,
6944                 [4] = 3333333,
6945                 [5] = 3566667,
6946                 [6] = 4266667,
6947         };
6948         static const unsigned int elk_vco[8] = {
6949                 [0] = 3200000,
6950                 [1] = 4000000,
6951                 [2] = 5333333,
6952                 [3] = 4800000,
6953         };
6954         static const unsigned int ctg_vco[8] = {
6955                 [0] = 3200000,
6956                 [1] = 4000000,
6957                 [2] = 5333333,
6958                 [3] = 6400000,
6959                 [4] = 2666667,
6960                 [5] = 4266667,
6961         };
6962         const unsigned int *vco_table;
6963         unsigned int vco;
6964         uint8_t tmp = 0;
6965
6966         /* FIXME other chipsets? */
6967         if (IS_GM45(dev))
6968                 vco_table = ctg_vco;
6969         else if (IS_G4X(dev))
6970                 vco_table = elk_vco;
6971         else if (IS_CRESTLINE(dev))
6972                 vco_table = cl_vco;
6973         else if (IS_PINEVIEW(dev))
6974                 vco_table = pnv_vco;
6975         else if (IS_G33(dev))
6976                 vco_table = blb_vco;
6977         else
6978                 return 0;
6979
6980         tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6981
6982         vco = vco_table[tmp & 0x7];
6983         if (vco == 0)
6984                 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6985         else
6986                 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6987
6988         return vco;
6989 }
6990
6991 static int gm45_get_display_clock_speed(struct drm_device *dev)
6992 {
6993         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6994         uint16_t tmp = 0;
6995
6996         pci_read_config_word(dev->pdev, GCFGC, &tmp);
6997
6998         cdclk_sel = (tmp >> 12) & 0x1;
6999
7000         switch (vco) {
7001         case 2666667:
7002         case 4000000:
7003         case 5333333:
7004                 return cdclk_sel ? 333333 : 222222;
7005         case 3200000:
7006                 return cdclk_sel ? 320000 : 228571;
7007         default:
7008                 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7009                 return 222222;
7010         }
7011 }
7012
7013 static int i965gm_get_display_clock_speed(struct drm_device *dev)
7014 {
7015         static const uint8_t div_3200[] = { 16, 10,  8 };
7016         static const uint8_t div_4000[] = { 20, 12, 10 };
7017         static const uint8_t div_5333[] = { 24, 16, 14 };
7018         const uint8_t *div_table;
7019         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7020         uint16_t tmp = 0;
7021
7022         pci_read_config_word(dev->pdev, GCFGC, &tmp);
7023
7024         cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7025
7026         if (cdclk_sel >= ARRAY_SIZE(div_3200))
7027                 goto fail;
7028
7029         switch (vco) {
7030         case 3200000:
7031                 div_table = div_3200;
7032                 break;
7033         case 4000000:
7034                 div_table = div_4000;
7035                 break;
7036         case 5333333:
7037                 div_table = div_5333;
7038                 break;
7039         default:
7040                 goto fail;
7041         }
7042
7043         return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7044
7045 fail:
7046         DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7047         return 200000;
7048 }
7049
7050 static int g33_get_display_clock_speed(struct drm_device *dev)
7051 {
7052         static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7053         static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7054         static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7055         static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7056         const uint8_t *div_table;
7057         unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7058         uint16_t tmp = 0;
7059
7060         pci_read_config_word(dev->pdev, GCFGC, &tmp);
7061
7062         cdclk_sel = (tmp >> 4) & 0x7;
7063
7064         if (cdclk_sel >= ARRAY_SIZE(div_3200))
7065                 goto fail;
7066
7067         switch (vco) {
7068         case 3200000:
7069                 div_table = div_3200;
7070                 break;
7071         case 4000000:
7072                 div_table = div_4000;
7073                 break;
7074         case 4800000:
7075                 div_table = div_4800;
7076                 break;
7077         case 5333333:
7078                 div_table = div_5333;
7079                 break;
7080         default:
7081                 goto fail;
7082         }
7083
7084         return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7085
7086 fail:
7087         DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7088         return 190476;
7089 }
7090
7091 static void
7092 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7093 {
7094         while (*num > DATA_LINK_M_N_MASK ||
7095                *den > DATA_LINK_M_N_MASK) {
7096                 *num >>= 1;
7097                 *den >>= 1;
7098         }
7099 }
7100
7101 static void compute_m_n(unsigned int m, unsigned int n,
7102                         uint32_t *ret_m, uint32_t *ret_n)
7103 {
7104         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7105         *ret_m = div_u64((uint64_t) m * *ret_n, n);
7106         intel_reduce_m_n_ratio(ret_m, ret_n);
7107 }
7108
7109 void
7110 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7111                        int pixel_clock, int link_clock,
7112                        struct intel_link_m_n *m_n)
7113 {
7114         m_n->tu = 64;
7115
7116         compute_m_n(bits_per_pixel * pixel_clock,
7117                     link_clock * nlanes * 8,
7118                     &m_n->gmch_m, &m_n->gmch_n);
7119
7120         compute_m_n(pixel_clock, link_clock,
7121                     &m_n->link_m, &m_n->link_n);
7122 }
7123
7124 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7125 {
7126         if (i915.panel_use_ssc >= 0)
7127                 return i915.panel_use_ssc != 0;
7128         return dev_priv->vbt.lvds_use_ssc
7129                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7130 }
7131
7132 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7133 {
7134         return (1 << dpll->n) << 16 | dpll->m2;
7135 }
7136
7137 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7138 {
7139         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7140 }
7141
7142 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7143                                      struct intel_crtc_state *crtc_state,
7144                                      struct dpll *reduced_clock)
7145 {
7146         struct drm_device *dev = crtc->base.dev;
7147         u32 fp, fp2 = 0;
7148
7149         if (IS_PINEVIEW(dev)) {
7150                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7151                 if (reduced_clock)
7152                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7153         } else {
7154                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7155                 if (reduced_clock)
7156                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7157         }
7158
7159         crtc_state->dpll_hw_state.fp0 = fp;
7160
7161         crtc->lowfreq_avail = false;
7162         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7163             reduced_clock) {
7164                 crtc_state->dpll_hw_state.fp1 = fp2;
7165                 crtc->lowfreq_avail = true;
7166         } else {
7167                 crtc_state->dpll_hw_state.fp1 = fp;
7168         }
7169 }
7170
7171 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7172                 pipe)
7173 {
7174         u32 reg_val;
7175
7176         /*
7177          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7178          * and set it to a reasonable value instead.
7179          */
7180         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7181         reg_val &= 0xffffff00;
7182         reg_val |= 0x00000030;
7183         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7184
7185         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7186         reg_val &= 0x8cffffff;
7187         reg_val = 0x8c000000;
7188         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7189
7190         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7191         reg_val &= 0xffffff00;
7192         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7193
7194         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7195         reg_val &= 0x00ffffff;
7196         reg_val |= 0xb0000000;
7197         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7198 }
7199
7200 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7201                                          struct intel_link_m_n *m_n)
7202 {
7203         struct drm_device *dev = crtc->base.dev;
7204         struct drm_i915_private *dev_priv = to_i915(dev);
7205         int pipe = crtc->pipe;
7206
7207         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7208         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7209         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7210         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7211 }
7212
7213 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7214                                          struct intel_link_m_n *m_n,
7215                                          struct intel_link_m_n *m2_n2)
7216 {
7217         struct drm_device *dev = crtc->base.dev;
7218         struct drm_i915_private *dev_priv = to_i915(dev);
7219         int pipe = crtc->pipe;
7220         enum transcoder transcoder = crtc->config->cpu_transcoder;
7221
7222         if (INTEL_INFO(dev)->gen >= 5) {
7223                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7224                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7225                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7226                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7227                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7228                  * for gen < 8) and if DRRS is supported (to make sure the
7229                  * registers are not unnecessarily accessed).
7230                  */
7231                 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7232                         crtc->config->has_drrs) {
7233                         I915_WRITE(PIPE_DATA_M2(transcoder),
7234                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7235                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7236                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7237                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7238                 }
7239         } else {
7240                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7241                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7242                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7243                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7244         }
7245 }
7246
7247 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7248 {
7249         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7250
7251         if (m_n == M1_N1) {
7252                 dp_m_n = &crtc->config->dp_m_n;
7253                 dp_m2_n2 = &crtc->config->dp_m2_n2;
7254         } else if (m_n == M2_N2) {
7255
7256                 /*
7257                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7258                  * needs to be programmed into M1_N1.
7259                  */
7260                 dp_m_n = &crtc->config->dp_m2_n2;
7261         } else {
7262                 DRM_ERROR("Unsupported divider value\n");
7263                 return;
7264         }
7265
7266         if (crtc->config->has_pch_encoder)
7267                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7268         else
7269                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7270 }
7271
7272 static void vlv_compute_dpll(struct intel_crtc *crtc,
7273                              struct intel_crtc_state *pipe_config)
7274 {
7275         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7276                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7277         if (crtc->pipe != PIPE_A)
7278                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7279
7280         /* DPLL not used with DSI, but still need the rest set up */
7281         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7282                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7283                         DPLL_EXT_BUFFER_ENABLE_VLV;
7284
7285         pipe_config->dpll_hw_state.dpll_md =
7286                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7287 }
7288
7289 static void chv_compute_dpll(struct intel_crtc *crtc,
7290                              struct intel_crtc_state *pipe_config)
7291 {
7292         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7293                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7294         if (crtc->pipe != PIPE_A)
7295                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7296
7297         /* DPLL not used with DSI, but still need the rest set up */
7298         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7299                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7300
7301         pipe_config->dpll_hw_state.dpll_md =
7302                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7303 }
7304
7305 static void vlv_prepare_pll(struct intel_crtc *crtc,
7306                             const struct intel_crtc_state *pipe_config)
7307 {
7308         struct drm_device *dev = crtc->base.dev;
7309         struct drm_i915_private *dev_priv = to_i915(dev);
7310         enum pipe pipe = crtc->pipe;
7311         u32 mdiv;
7312         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7313         u32 coreclk, reg_val;
7314
7315         /* Enable Refclk */
7316         I915_WRITE(DPLL(pipe),
7317                    pipe_config->dpll_hw_state.dpll &
7318                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7319
7320         /* No need to actually set up the DPLL with DSI */
7321         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7322                 return;
7323
7324         mutex_lock(&dev_priv->sb_lock);
7325
7326         bestn = pipe_config->dpll.n;
7327         bestm1 = pipe_config->dpll.m1;
7328         bestm2 = pipe_config->dpll.m2;
7329         bestp1 = pipe_config->dpll.p1;
7330         bestp2 = pipe_config->dpll.p2;
7331
7332         /* See eDP HDMI DPIO driver vbios notes doc */
7333
7334         /* PLL B needs special handling */
7335         if (pipe == PIPE_B)
7336                 vlv_pllb_recal_opamp(dev_priv, pipe);
7337
7338         /* Set up Tx target for periodic Rcomp update */
7339         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7340
7341         /* Disable target IRef on PLL */
7342         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7343         reg_val &= 0x00ffffff;
7344         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7345
7346         /* Disable fast lock */
7347         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7348
7349         /* Set idtafcrecal before PLL is enabled */
7350         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7351         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7352         mdiv |= ((bestn << DPIO_N_SHIFT));
7353         mdiv |= (1 << DPIO_K_SHIFT);
7354
7355         /*
7356          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7357          * but we don't support that).
7358          * Note: don't use the DAC post divider as it seems unstable.
7359          */
7360         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7361         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7362
7363         mdiv |= DPIO_ENABLE_CALIBRATION;
7364         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7365
7366         /* Set HBR and RBR LPF coefficients */
7367         if (pipe_config->port_clock == 162000 ||
7368             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7369             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7370                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7371                                  0x009f0003);
7372         else
7373                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7374                                  0x00d0000f);
7375
7376         if (intel_crtc_has_dp_encoder(pipe_config)) {
7377                 /* Use SSC source */
7378                 if (pipe == PIPE_A)
7379                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7380                                          0x0df40000);
7381                 else
7382                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7383                                          0x0df70000);
7384         } else { /* HDMI or VGA */
7385                 /* Use bend source */
7386                 if (pipe == PIPE_A)
7387                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7388                                          0x0df70000);
7389                 else
7390                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7391                                          0x0df40000);
7392         }
7393
7394         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7395         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7396         if (intel_crtc_has_dp_encoder(crtc->config))
7397                 coreclk |= 0x01000000;
7398         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7399
7400         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7401         mutex_unlock(&dev_priv->sb_lock);
7402 }
7403
7404 static void chv_prepare_pll(struct intel_crtc *crtc,
7405                             const struct intel_crtc_state *pipe_config)
7406 {
7407         struct drm_device *dev = crtc->base.dev;
7408         struct drm_i915_private *dev_priv = to_i915(dev);
7409         enum pipe pipe = crtc->pipe;
7410         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7411         u32 loopfilter, tribuf_calcntr;
7412         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7413         u32 dpio_val;
7414         int vco;
7415
7416         /* Enable Refclk and SSC */
7417         I915_WRITE(DPLL(pipe),
7418                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7419
7420         /* No need to actually set up the DPLL with DSI */
7421         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7422                 return;
7423
7424         bestn = pipe_config->dpll.n;
7425         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7426         bestm1 = pipe_config->dpll.m1;
7427         bestm2 = pipe_config->dpll.m2 >> 22;
7428         bestp1 = pipe_config->dpll.p1;
7429         bestp2 = pipe_config->dpll.p2;
7430         vco = pipe_config->dpll.vco;
7431         dpio_val = 0;
7432         loopfilter = 0;
7433
7434         mutex_lock(&dev_priv->sb_lock);
7435
7436         /* p1 and p2 divider */
7437         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7438                         5 << DPIO_CHV_S1_DIV_SHIFT |
7439                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7440                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7441                         1 << DPIO_CHV_K_DIV_SHIFT);
7442
7443         /* Feedback post-divider - m2 */
7444         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7445
7446         /* Feedback refclk divider - n and m1 */
7447         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7448                         DPIO_CHV_M1_DIV_BY_2 |
7449                         1 << DPIO_CHV_N_DIV_SHIFT);
7450
7451         /* M2 fraction division */
7452         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7453
7454         /* M2 fraction division enable */
7455         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7456         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7457         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7458         if (bestm2_frac)
7459                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7460         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7461
7462         /* Program digital lock detect threshold */
7463         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7464         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7465                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7466         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7467         if (!bestm2_frac)
7468                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7469         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7470
7471         /* Loop filter */
7472         if (vco == 5400000) {
7473                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7474                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7475                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7476                 tribuf_calcntr = 0x9;
7477         } else if (vco <= 6200000) {
7478                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7479                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7480                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7481                 tribuf_calcntr = 0x9;
7482         } else if (vco <= 6480000) {
7483                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7484                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7485                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7486                 tribuf_calcntr = 0x8;
7487         } else {
7488                 /* Not supported. Apply the same limits as in the max case */
7489                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7490                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7491                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7492                 tribuf_calcntr = 0;
7493         }
7494         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7495
7496         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7497         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7498         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7499         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7500
7501         /* AFC Recal */
7502         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7503                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7504                         DPIO_AFC_RECAL);
7505
7506         mutex_unlock(&dev_priv->sb_lock);
7507 }
7508
7509 /**
7510  * vlv_force_pll_on - forcibly enable just the PLL
7511  * @dev_priv: i915 private structure
7512  * @pipe: pipe PLL to enable
7513  * @dpll: PLL configuration
7514  *
7515  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7516  * in cases where we need the PLL enabled even when @pipe is not going to
7517  * be enabled.
7518  */
7519 int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7520                      const struct dpll *dpll)
7521 {
7522         struct intel_crtc *crtc =
7523                 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7524         struct intel_crtc_state *pipe_config;
7525
7526         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7527         if (!pipe_config)
7528                 return -ENOMEM;
7529
7530         pipe_config->base.crtc = &crtc->base;
7531         pipe_config->pixel_multiplier = 1;
7532         pipe_config->dpll = *dpll;
7533
7534         if (IS_CHERRYVIEW(dev)) {
7535                 chv_compute_dpll(crtc, pipe_config);
7536                 chv_prepare_pll(crtc, pipe_config);
7537                 chv_enable_pll(crtc, pipe_config);
7538         } else {
7539                 vlv_compute_dpll(crtc, pipe_config);
7540                 vlv_prepare_pll(crtc, pipe_config);
7541                 vlv_enable_pll(crtc, pipe_config);
7542         }
7543
7544         kfree(pipe_config);
7545
7546         return 0;
7547 }
7548
7549 /**
7550  * vlv_force_pll_off - forcibly disable just the PLL
7551  * @dev_priv: i915 private structure
7552  * @pipe: pipe PLL to disable
7553  *
7554  * Disable the PLL for @pipe. To be used in cases where we need
7555  * the PLL enabled even when @pipe is not going to be enabled.
7556  */
7557 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7558 {
7559         if (IS_CHERRYVIEW(dev))
7560                 chv_disable_pll(to_i915(dev), pipe);
7561         else
7562                 vlv_disable_pll(to_i915(dev), pipe);
7563 }
7564
7565 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7566                               struct intel_crtc_state *crtc_state,
7567                               struct dpll *reduced_clock)
7568 {
7569         struct drm_device *dev = crtc->base.dev;
7570         struct drm_i915_private *dev_priv = to_i915(dev);
7571         u32 dpll;
7572         struct dpll *clock = &crtc_state->dpll;
7573
7574         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7575
7576         dpll = DPLL_VGA_MODE_DIS;
7577
7578         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7579                 dpll |= DPLLB_MODE_LVDS;
7580         else
7581                 dpll |= DPLLB_MODE_DAC_SERIAL;
7582
7583         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7584                 dpll |= (crtc_state->pixel_multiplier - 1)
7585                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7586         }
7587
7588         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7589             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7590                 dpll |= DPLL_SDVO_HIGH_SPEED;
7591
7592         if (intel_crtc_has_dp_encoder(crtc_state))
7593                 dpll |= DPLL_SDVO_HIGH_SPEED;
7594
7595         /* compute bitmask from p1 value */
7596         if (IS_PINEVIEW(dev))
7597                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7598         else {
7599                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7600                 if (IS_G4X(dev) && reduced_clock)
7601                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7602         }
7603         switch (clock->p2) {
7604         case 5:
7605                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7606                 break;
7607         case 7:
7608                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7609                 break;
7610         case 10:
7611                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7612                 break;
7613         case 14:
7614                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7615                 break;
7616         }
7617         if (INTEL_INFO(dev)->gen >= 4)
7618                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7619
7620         if (crtc_state->sdvo_tv_clock)
7621                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7622         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7623                  intel_panel_use_ssc(dev_priv))
7624                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7625         else
7626                 dpll |= PLL_REF_INPUT_DREFCLK;
7627
7628         dpll |= DPLL_VCO_ENABLE;
7629         crtc_state->dpll_hw_state.dpll = dpll;
7630
7631         if (INTEL_INFO(dev)->gen >= 4) {
7632                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7633                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7634                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7635         }
7636 }
7637
7638 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7639                               struct intel_crtc_state *crtc_state,
7640                               struct dpll *reduced_clock)
7641 {
7642         struct drm_device *dev = crtc->base.dev;
7643         struct drm_i915_private *dev_priv = to_i915(dev);
7644         u32 dpll;
7645         struct dpll *clock = &crtc_state->dpll;
7646
7647         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7648
7649         dpll = DPLL_VGA_MODE_DIS;
7650
7651         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7652                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7653         } else {
7654                 if (clock->p1 == 2)
7655                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7656                 else
7657                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7658                 if (clock->p2 == 4)
7659                         dpll |= PLL_P2_DIVIDE_BY_4;
7660         }
7661
7662         if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7663                 dpll |= DPLL_DVO_2X_MODE;
7664
7665         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7666             intel_panel_use_ssc(dev_priv))
7667                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7668         else
7669                 dpll |= PLL_REF_INPUT_DREFCLK;
7670
7671         dpll |= DPLL_VCO_ENABLE;
7672         crtc_state->dpll_hw_state.dpll = dpll;
7673 }
7674
7675 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7676 {
7677         struct drm_device *dev = intel_crtc->base.dev;
7678         struct drm_i915_private *dev_priv = to_i915(dev);
7679         enum pipe pipe = intel_crtc->pipe;
7680         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7681         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7682         uint32_t crtc_vtotal, crtc_vblank_end;
7683         int vsyncshift = 0;
7684
7685         /* We need to be careful not to changed the adjusted mode, for otherwise
7686          * the hw state checker will get angry at the mismatch. */
7687         crtc_vtotal = adjusted_mode->crtc_vtotal;
7688         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7689
7690         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7691                 /* the chip adds 2 halflines automatically */
7692                 crtc_vtotal -= 1;
7693                 crtc_vblank_end -= 1;
7694
7695                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7696                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7697                 else
7698                         vsyncshift = adjusted_mode->crtc_hsync_start -
7699                                 adjusted_mode->crtc_htotal / 2;
7700                 if (vsyncshift < 0)
7701                         vsyncshift += adjusted_mode->crtc_htotal;
7702         }
7703
7704         if (INTEL_INFO(dev)->gen > 3)
7705                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7706
7707         I915_WRITE(HTOTAL(cpu_transcoder),
7708                    (adjusted_mode->crtc_hdisplay - 1) |
7709                    ((adjusted_mode->crtc_htotal - 1) << 16));
7710         I915_WRITE(HBLANK(cpu_transcoder),
7711                    (adjusted_mode->crtc_hblank_start - 1) |
7712                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7713         I915_WRITE(HSYNC(cpu_transcoder),
7714                    (adjusted_mode->crtc_hsync_start - 1) |
7715                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7716
7717         I915_WRITE(VTOTAL(cpu_transcoder),
7718                    (adjusted_mode->crtc_vdisplay - 1) |
7719                    ((crtc_vtotal - 1) << 16));
7720         I915_WRITE(VBLANK(cpu_transcoder),
7721                    (adjusted_mode->crtc_vblank_start - 1) |
7722                    ((crtc_vblank_end - 1) << 16));
7723         I915_WRITE(VSYNC(cpu_transcoder),
7724                    (adjusted_mode->crtc_vsync_start - 1) |
7725                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7726
7727         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7728          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7729          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7730          * bits. */
7731         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7732             (pipe == PIPE_B || pipe == PIPE_C))
7733                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7734
7735 }
7736
7737 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7738 {
7739         struct drm_device *dev = intel_crtc->base.dev;
7740         struct drm_i915_private *dev_priv = to_i915(dev);
7741         enum pipe pipe = intel_crtc->pipe;
7742
7743         /* pipesrc controls the size that is scaled from, which should
7744          * always be the user's requested size.
7745          */
7746         I915_WRITE(PIPESRC(pipe),
7747                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
7748                    (intel_crtc->config->pipe_src_h - 1));
7749 }
7750
7751 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7752                                    struct intel_crtc_state *pipe_config)
7753 {
7754         struct drm_device *dev = crtc->base.dev;
7755         struct drm_i915_private *dev_priv = to_i915(dev);
7756         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7757         uint32_t tmp;
7758
7759         tmp = I915_READ(HTOTAL(cpu_transcoder));
7760         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7761         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7762         tmp = I915_READ(HBLANK(cpu_transcoder));
7763         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7764         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7765         tmp = I915_READ(HSYNC(cpu_transcoder));
7766         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7767         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7768
7769         tmp = I915_READ(VTOTAL(cpu_transcoder));
7770         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7771         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7772         tmp = I915_READ(VBLANK(cpu_transcoder));
7773         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7774         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7775         tmp = I915_READ(VSYNC(cpu_transcoder));
7776         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7777         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7778
7779         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7780                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7781                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7782                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7783         }
7784 }
7785
7786 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7787                                     struct intel_crtc_state *pipe_config)
7788 {
7789         struct drm_device *dev = crtc->base.dev;
7790         struct drm_i915_private *dev_priv = to_i915(dev);
7791         u32 tmp;
7792
7793         tmp = I915_READ(PIPESRC(crtc->pipe));
7794         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7795         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7796
7797         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7798         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7799 }
7800
7801 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7802                                  struct intel_crtc_state *pipe_config)
7803 {
7804         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7805         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7806         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7807         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7808
7809         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7810         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7811         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7812         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7813
7814         mode->flags = pipe_config->base.adjusted_mode.flags;
7815         mode->type = DRM_MODE_TYPE_DRIVER;
7816
7817         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7818         mode->flags |= pipe_config->base.adjusted_mode.flags;
7819
7820         mode->hsync = drm_mode_hsync(mode);
7821         mode->vrefresh = drm_mode_vrefresh(mode);
7822         drm_mode_set_name(mode);
7823 }
7824
7825 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7826 {
7827         struct drm_device *dev = intel_crtc->base.dev;
7828         struct drm_i915_private *dev_priv = to_i915(dev);
7829         uint32_t pipeconf;
7830
7831         pipeconf = 0;
7832
7833         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7834             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7835                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7836
7837         if (intel_crtc->config->double_wide)
7838                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7839
7840         /* only g4x and later have fancy bpc/dither controls */
7841         if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7842                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7843                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7844                         pipeconf |= PIPECONF_DITHER_EN |
7845                                     PIPECONF_DITHER_TYPE_SP;
7846
7847                 switch (intel_crtc->config->pipe_bpp) {
7848                 case 18:
7849                         pipeconf |= PIPECONF_6BPC;
7850                         break;
7851                 case 24:
7852                         pipeconf |= PIPECONF_8BPC;
7853                         break;
7854                 case 30:
7855                         pipeconf |= PIPECONF_10BPC;
7856                         break;
7857                 default:
7858                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7859                         BUG();
7860                 }
7861         }
7862
7863         if (HAS_PIPE_CXSR(dev)) {
7864                 if (intel_crtc->lowfreq_avail) {
7865                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7866                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7867                 } else {
7868                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7869                 }
7870         }
7871
7872         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7873                 if (INTEL_INFO(dev)->gen < 4 ||
7874                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7875                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7876                 else
7877                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7878         } else
7879                 pipeconf |= PIPECONF_PROGRESSIVE;
7880
7881         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7882              intel_crtc->config->limited_color_range)
7883                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7884
7885         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7886         POSTING_READ(PIPECONF(intel_crtc->pipe));
7887 }
7888
7889 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7890                                    struct intel_crtc_state *crtc_state)
7891 {
7892         struct drm_device *dev = crtc->base.dev;
7893         struct drm_i915_private *dev_priv = to_i915(dev);
7894         const struct intel_limit *limit;
7895         int refclk = 48000;
7896
7897         memset(&crtc_state->dpll_hw_state, 0,
7898                sizeof(crtc_state->dpll_hw_state));
7899
7900         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7901                 if (intel_panel_use_ssc(dev_priv)) {
7902                         refclk = dev_priv->vbt.lvds_ssc_freq;
7903                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7904                 }
7905
7906                 limit = &intel_limits_i8xx_lvds;
7907         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7908                 limit = &intel_limits_i8xx_dvo;
7909         } else {
7910                 limit = &intel_limits_i8xx_dac;
7911         }
7912
7913         if (!crtc_state->clock_set &&
7914             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7915                                  refclk, NULL, &crtc_state->dpll)) {
7916                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7917                 return -EINVAL;
7918         }
7919
7920         i8xx_compute_dpll(crtc, crtc_state, NULL);
7921
7922         return 0;
7923 }
7924
7925 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7926                                   struct intel_crtc_state *crtc_state)
7927 {
7928         struct drm_device *dev = crtc->base.dev;
7929         struct drm_i915_private *dev_priv = to_i915(dev);
7930         const struct intel_limit *limit;
7931         int refclk = 96000;
7932
7933         memset(&crtc_state->dpll_hw_state, 0,
7934                sizeof(crtc_state->dpll_hw_state));
7935
7936         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7937                 if (intel_panel_use_ssc(dev_priv)) {
7938                         refclk = dev_priv->vbt.lvds_ssc_freq;
7939                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7940                 }
7941
7942                 if (intel_is_dual_link_lvds(dev))
7943                         limit = &intel_limits_g4x_dual_channel_lvds;
7944                 else
7945                         limit = &intel_limits_g4x_single_channel_lvds;
7946         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7947                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7948                 limit = &intel_limits_g4x_hdmi;
7949         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7950                 limit = &intel_limits_g4x_sdvo;
7951         } else {
7952                 /* The option is for other outputs */
7953                 limit = &intel_limits_i9xx_sdvo;
7954         }
7955
7956         if (!crtc_state->clock_set &&
7957             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7958                                 refclk, NULL, &crtc_state->dpll)) {
7959                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7960                 return -EINVAL;
7961         }
7962
7963         i9xx_compute_dpll(crtc, crtc_state, NULL);
7964
7965         return 0;
7966 }
7967
7968 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7969                                   struct intel_crtc_state *crtc_state)
7970 {
7971         struct drm_device *dev = crtc->base.dev;
7972         struct drm_i915_private *dev_priv = to_i915(dev);
7973         const struct intel_limit *limit;
7974         int refclk = 96000;
7975
7976         memset(&crtc_state->dpll_hw_state, 0,
7977                sizeof(crtc_state->dpll_hw_state));
7978
7979         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7980                 if (intel_panel_use_ssc(dev_priv)) {
7981                         refclk = dev_priv->vbt.lvds_ssc_freq;
7982                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7983                 }
7984
7985                 limit = &intel_limits_pineview_lvds;
7986         } else {
7987                 limit = &intel_limits_pineview_sdvo;
7988         }
7989
7990         if (!crtc_state->clock_set &&
7991             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7992                                 refclk, NULL, &crtc_state->dpll)) {
7993                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7994                 return -EINVAL;
7995         }
7996
7997         i9xx_compute_dpll(crtc, crtc_state, NULL);
7998
7999         return 0;
8000 }
8001
8002 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8003                                    struct intel_crtc_state *crtc_state)
8004 {
8005         struct drm_device *dev = crtc->base.dev;
8006         struct drm_i915_private *dev_priv = to_i915(dev);
8007         const struct intel_limit *limit;
8008         int refclk = 96000;
8009
8010         memset(&crtc_state->dpll_hw_state, 0,
8011                sizeof(crtc_state->dpll_hw_state));
8012
8013         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8014                 if (intel_panel_use_ssc(dev_priv)) {
8015                         refclk = dev_priv->vbt.lvds_ssc_freq;
8016                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8017                 }
8018
8019                 limit = &intel_limits_i9xx_lvds;
8020         } else {
8021                 limit = &intel_limits_i9xx_sdvo;
8022         }
8023
8024         if (!crtc_state->clock_set &&
8025             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8026                                  refclk, NULL, &crtc_state->dpll)) {
8027                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8028                 return -EINVAL;
8029         }
8030
8031         i9xx_compute_dpll(crtc, crtc_state, NULL);
8032
8033         return 0;
8034 }
8035
8036 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8037                                   struct intel_crtc_state *crtc_state)
8038 {
8039         int refclk = 100000;
8040         const struct intel_limit *limit = &intel_limits_chv;
8041
8042         memset(&crtc_state->dpll_hw_state, 0,
8043                sizeof(crtc_state->dpll_hw_state));
8044
8045         if (!crtc_state->clock_set &&
8046             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8047                                 refclk, NULL, &crtc_state->dpll)) {
8048                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8049                 return -EINVAL;
8050         }
8051
8052         chv_compute_dpll(crtc, crtc_state);
8053
8054         return 0;
8055 }
8056
8057 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8058                                   struct intel_crtc_state *crtc_state)
8059 {
8060         int refclk = 100000;
8061         const struct intel_limit *limit = &intel_limits_vlv;
8062
8063         memset(&crtc_state->dpll_hw_state, 0,
8064                sizeof(crtc_state->dpll_hw_state));
8065
8066         if (!crtc_state->clock_set &&
8067             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8068                                 refclk, NULL, &crtc_state->dpll)) {
8069                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8070                 return -EINVAL;
8071         }
8072
8073         vlv_compute_dpll(crtc, crtc_state);
8074
8075         return 0;
8076 }
8077
8078 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8079                                  struct intel_crtc_state *pipe_config)
8080 {
8081         struct drm_device *dev = crtc->base.dev;
8082         struct drm_i915_private *dev_priv = to_i915(dev);
8083         uint32_t tmp;
8084
8085         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8086                 return;
8087
8088         tmp = I915_READ(PFIT_CONTROL);
8089         if (!(tmp & PFIT_ENABLE))
8090                 return;
8091
8092         /* Check whether the pfit is attached to our pipe. */
8093         if (INTEL_INFO(dev)->gen < 4) {
8094                 if (crtc->pipe != PIPE_B)
8095                         return;
8096         } else {
8097                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8098                         return;
8099         }
8100
8101         pipe_config->gmch_pfit.control = tmp;
8102         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8103 }
8104
8105 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8106                                struct intel_crtc_state *pipe_config)
8107 {
8108         struct drm_device *dev = crtc->base.dev;
8109         struct drm_i915_private *dev_priv = to_i915(dev);
8110         int pipe = pipe_config->cpu_transcoder;
8111         struct dpll clock;
8112         u32 mdiv;
8113         int refclk = 100000;
8114
8115         /* In case of DSI, DPLL will not be used */
8116         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8117                 return;
8118
8119         mutex_lock(&dev_priv->sb_lock);
8120         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8121         mutex_unlock(&dev_priv->sb_lock);
8122
8123         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8124         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8125         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8126         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8127         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8128
8129         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8130 }
8131
8132 static void
8133 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8134                               struct intel_initial_plane_config *plane_config)
8135 {
8136         struct drm_device *dev = crtc->base.dev;
8137         struct drm_i915_private *dev_priv = to_i915(dev);
8138         u32 val, base, offset;
8139         int pipe = crtc->pipe, plane = crtc->plane;
8140         int fourcc, pixel_format;
8141         unsigned int aligned_height;
8142         struct drm_framebuffer *fb;
8143         struct intel_framebuffer *intel_fb;
8144
8145         val = I915_READ(DSPCNTR(plane));
8146         if (!(val & DISPLAY_PLANE_ENABLE))
8147                 return;
8148
8149         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8150         if (!intel_fb) {
8151                 DRM_DEBUG_KMS("failed to alloc fb\n");
8152                 return;
8153         }
8154
8155         fb = &intel_fb->base;
8156
8157         if (INTEL_INFO(dev)->gen >= 4) {
8158                 if (val & DISPPLANE_TILED) {
8159                         plane_config->tiling = I915_TILING_X;
8160                         fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8161                 }
8162         }
8163
8164         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8165         fourcc = i9xx_format_to_fourcc(pixel_format);
8166         fb->pixel_format = fourcc;
8167         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8168
8169         if (INTEL_INFO(dev)->gen >= 4) {
8170                 if (plane_config->tiling)
8171                         offset = I915_READ(DSPTILEOFF(plane));
8172                 else
8173                         offset = I915_READ(DSPLINOFF(plane));
8174                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8175         } else {
8176                 base = I915_READ(DSPADDR(plane));
8177         }
8178         plane_config->base = base;
8179
8180         val = I915_READ(PIPESRC(pipe));
8181         fb->width = ((val >> 16) & 0xfff) + 1;
8182         fb->height = ((val >> 0) & 0xfff) + 1;
8183
8184         val = I915_READ(DSPSTRIDE(pipe));
8185         fb->pitches[0] = val & 0xffffffc0;
8186
8187         aligned_height = intel_fb_align_height(dev, fb->height,
8188                                                fb->pixel_format,
8189                                                fb->modifier[0]);
8190
8191         plane_config->size = fb->pitches[0] * aligned_height;
8192
8193         DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8194                       pipe_name(pipe), plane, fb->width, fb->height,
8195                       fb->bits_per_pixel, base, fb->pitches[0],
8196                       plane_config->size);
8197
8198         plane_config->fb = intel_fb;
8199 }
8200
8201 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8202                                struct intel_crtc_state *pipe_config)
8203 {
8204         struct drm_device *dev = crtc->base.dev;
8205         struct drm_i915_private *dev_priv = to_i915(dev);
8206         int pipe = pipe_config->cpu_transcoder;
8207         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8208         struct dpll clock;
8209         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8210         int refclk = 100000;
8211
8212         /* In case of DSI, DPLL will not be used */
8213         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8214                 return;
8215
8216         mutex_lock(&dev_priv->sb_lock);
8217         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8218         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8219         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8220         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8221         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8222         mutex_unlock(&dev_priv->sb_lock);
8223
8224         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8225         clock.m2 = (pll_dw0 & 0xff) << 22;
8226         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8227                 clock.m2 |= pll_dw2 & 0x3fffff;
8228         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8229         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8230         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8231
8232         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8233 }
8234
8235 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8236                                  struct intel_crtc_state *pipe_config)
8237 {
8238         struct drm_device *dev = crtc->base.dev;
8239         struct drm_i915_private *dev_priv = to_i915(dev);
8240         enum intel_display_power_domain power_domain;
8241         uint32_t tmp;
8242         bool ret;
8243
8244         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8245         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8246                 return false;
8247
8248         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8249         pipe_config->shared_dpll = NULL;
8250
8251         ret = false;
8252
8253         tmp = I915_READ(PIPECONF(crtc->pipe));
8254         if (!(tmp & PIPECONF_ENABLE))
8255                 goto out;
8256
8257         if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8258                 switch (tmp & PIPECONF_BPC_MASK) {
8259                 case PIPECONF_6BPC:
8260                         pipe_config->pipe_bpp = 18;
8261                         break;
8262                 case PIPECONF_8BPC:
8263                         pipe_config->pipe_bpp = 24;
8264                         break;
8265                 case PIPECONF_10BPC:
8266                         pipe_config->pipe_bpp = 30;
8267                         break;
8268                 default:
8269                         break;
8270                 }
8271         }
8272
8273         if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8274             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8275                 pipe_config->limited_color_range = true;
8276
8277         if (INTEL_INFO(dev)->gen < 4)
8278                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8279
8280         intel_get_pipe_timings(crtc, pipe_config);
8281         intel_get_pipe_src_size(crtc, pipe_config);
8282
8283         i9xx_get_pfit_config(crtc, pipe_config);
8284
8285         if (INTEL_INFO(dev)->gen >= 4) {
8286                 /* No way to read it out on pipes B and C */
8287                 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8288                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8289                 else
8290                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8291                 pipe_config->pixel_multiplier =
8292                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8293                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8294                 pipe_config->dpll_hw_state.dpll_md = tmp;
8295         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8296                 tmp = I915_READ(DPLL(crtc->pipe));
8297                 pipe_config->pixel_multiplier =
8298                         ((tmp & SDVO_MULTIPLIER_MASK)
8299                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8300         } else {
8301                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8302                  * port and will be fixed up in the encoder->get_config
8303                  * function. */
8304                 pipe_config->pixel_multiplier = 1;
8305         }
8306         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8307         if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8308                 /*
8309                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8310                  * on 830. Filter it out here so that we don't
8311                  * report errors due to that.
8312                  */
8313                 if (IS_I830(dev))
8314                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8315
8316                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8317                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8318         } else {
8319                 /* Mask out read-only status bits. */
8320                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8321                                                      DPLL_PORTC_READY_MASK |
8322                                                      DPLL_PORTB_READY_MASK);
8323         }
8324
8325         if (IS_CHERRYVIEW(dev))
8326                 chv_crtc_clock_get(crtc, pipe_config);
8327         else if (IS_VALLEYVIEW(dev))
8328                 vlv_crtc_clock_get(crtc, pipe_config);
8329         else
8330                 i9xx_crtc_clock_get(crtc, pipe_config);
8331
8332         /*
8333          * Normally the dotclock is filled in by the encoder .get_config()
8334          * but in case the pipe is enabled w/o any ports we need a sane
8335          * default.
8336          */
8337         pipe_config->base.adjusted_mode.crtc_clock =
8338                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8339
8340         ret = true;
8341
8342 out:
8343         intel_display_power_put(dev_priv, power_domain);
8344
8345         return ret;
8346 }
8347
8348 static void ironlake_init_pch_refclk(struct drm_device *dev)
8349 {
8350         struct drm_i915_private *dev_priv = to_i915(dev);
8351         struct intel_encoder *encoder;
8352         int i;
8353         u32 val, final;
8354         bool has_lvds = false;
8355         bool has_cpu_edp = false;
8356         bool has_panel = false;
8357         bool has_ck505 = false;
8358         bool can_ssc = false;
8359         bool using_ssc_source = false;
8360
8361         /* We need to take the global config into account */
8362         for_each_intel_encoder(dev, encoder) {
8363                 switch (encoder->type) {
8364                 case INTEL_OUTPUT_LVDS:
8365                         has_panel = true;
8366                         has_lvds = true;
8367                         break;
8368                 case INTEL_OUTPUT_EDP:
8369                         has_panel = true;
8370                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8371                                 has_cpu_edp = true;
8372                         break;
8373                 default:
8374                         break;
8375                 }
8376         }
8377
8378         if (HAS_PCH_IBX(dev)) {
8379                 has_ck505 = dev_priv->vbt.display_clock_mode;
8380                 can_ssc = has_ck505;
8381         } else {
8382                 has_ck505 = false;
8383                 can_ssc = true;
8384         }
8385
8386         /* Check if any DPLLs are using the SSC source */
8387         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8388                 u32 temp = I915_READ(PCH_DPLL(i));
8389
8390                 if (!(temp & DPLL_VCO_ENABLE))
8391                         continue;
8392
8393                 if ((temp & PLL_REF_INPUT_MASK) ==
8394                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8395                         using_ssc_source = true;
8396                         break;
8397                 }
8398         }
8399
8400         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8401                       has_panel, has_lvds, has_ck505, using_ssc_source);
8402
8403         /* Ironlake: try to setup display ref clock before DPLL
8404          * enabling. This is only under driver's control after
8405          * PCH B stepping, previous chipset stepping should be
8406          * ignoring this setting.
8407          */
8408         val = I915_READ(PCH_DREF_CONTROL);
8409
8410         /* As we must carefully and slowly disable/enable each source in turn,
8411          * compute the final state we want first and check if we need to
8412          * make any changes at all.
8413          */
8414         final = val;
8415         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8416         if (has_ck505)
8417                 final |= DREF_NONSPREAD_CK505_ENABLE;
8418         else
8419                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8420
8421         final &= ~DREF_SSC_SOURCE_MASK;
8422         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8423         final &= ~DREF_SSC1_ENABLE;
8424
8425         if (has_panel) {
8426                 final |= DREF_SSC_SOURCE_ENABLE;
8427
8428                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8429                         final |= DREF_SSC1_ENABLE;
8430
8431                 if (has_cpu_edp) {
8432                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8433                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8434                         else
8435                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8436                 } else
8437                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8438         } else if (using_ssc_source) {
8439                 final |= DREF_SSC_SOURCE_ENABLE;
8440                 final |= DREF_SSC1_ENABLE;
8441         }
8442
8443         if (final == val)
8444                 return;
8445
8446         /* Always enable nonspread source */
8447         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8448
8449         if (has_ck505)
8450                 val |= DREF_NONSPREAD_CK505_ENABLE;
8451         else
8452                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8453
8454         if (has_panel) {
8455                 val &= ~DREF_SSC_SOURCE_MASK;
8456                 val |= DREF_SSC_SOURCE_ENABLE;
8457
8458                 /* SSC must be turned on before enabling the CPU output  */
8459                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8460                         DRM_DEBUG_KMS("Using SSC on panel\n");
8461                         val |= DREF_SSC1_ENABLE;
8462                 } else
8463                         val &= ~DREF_SSC1_ENABLE;
8464
8465                 /* Get SSC going before enabling the outputs */
8466                 I915_WRITE(PCH_DREF_CONTROL, val);
8467                 POSTING_READ(PCH_DREF_CONTROL);
8468                 udelay(200);
8469
8470                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8471
8472                 /* Enable CPU source on CPU attached eDP */
8473                 if (has_cpu_edp) {
8474                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8475                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8476                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8477                         } else
8478                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8479                 } else
8480                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8481
8482                 I915_WRITE(PCH_DREF_CONTROL, val);
8483                 POSTING_READ(PCH_DREF_CONTROL);
8484                 udelay(200);
8485         } else {
8486                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8487
8488                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8489
8490                 /* Turn off CPU output */
8491                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8492
8493                 I915_WRITE(PCH_DREF_CONTROL, val);
8494                 POSTING_READ(PCH_DREF_CONTROL);
8495                 udelay(200);
8496
8497                 if (!using_ssc_source) {
8498                         DRM_DEBUG_KMS("Disabling SSC source\n");
8499
8500                         /* Turn off the SSC source */
8501                         val &= ~DREF_SSC_SOURCE_MASK;
8502                         val |= DREF_SSC_SOURCE_DISABLE;
8503
8504                         /* Turn off SSC1 */
8505                         val &= ~DREF_SSC1_ENABLE;
8506
8507                         I915_WRITE(PCH_DREF_CONTROL, val);
8508                         POSTING_READ(PCH_DREF_CONTROL);
8509                         udelay(200);
8510                 }
8511         }
8512
8513         BUG_ON(val != final);
8514 }
8515
8516 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8517 {
8518         uint32_t tmp;
8519
8520         tmp = I915_READ(SOUTH_CHICKEN2);
8521         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8522         I915_WRITE(SOUTH_CHICKEN2, tmp);
8523
8524         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8525                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8526                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8527
8528         tmp = I915_READ(SOUTH_CHICKEN2);
8529         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8530         I915_WRITE(SOUTH_CHICKEN2, tmp);
8531
8532         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8533                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8534                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8535 }
8536
8537 /* WaMPhyProgramming:hsw */
8538 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8539 {
8540         uint32_t tmp;
8541
8542         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8543         tmp &= ~(0xFF << 24);
8544         tmp |= (0x12 << 24);
8545         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8546
8547         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8548         tmp |= (1 << 11);
8549         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8550
8551         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8552         tmp |= (1 << 11);
8553         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8554
8555         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8556         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8557         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8558
8559         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8560         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8561         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8562
8563         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8564         tmp &= ~(7 << 13);
8565         tmp |= (5 << 13);
8566         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8567
8568         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8569         tmp &= ~(7 << 13);
8570         tmp |= (5 << 13);
8571         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8572
8573         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8574         tmp &= ~0xFF;
8575         tmp |= 0x1C;
8576         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8577
8578         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8579         tmp &= ~0xFF;
8580         tmp |= 0x1C;
8581         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8582
8583         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8584         tmp &= ~(0xFF << 16);
8585         tmp |= (0x1C << 16);
8586         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8587
8588         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8589         tmp &= ~(0xFF << 16);
8590         tmp |= (0x1C << 16);
8591         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8592
8593         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8594         tmp |= (1 << 27);
8595         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8596
8597         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8598         tmp |= (1 << 27);
8599         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8600
8601         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8602         tmp &= ~(0xF << 28);
8603         tmp |= (4 << 28);
8604         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8605
8606         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8607         tmp &= ~(0xF << 28);
8608         tmp |= (4 << 28);
8609         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8610 }
8611
8612 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8613  * Programming" based on the parameters passed:
8614  * - Sequence to enable CLKOUT_DP
8615  * - Sequence to enable CLKOUT_DP without spread
8616  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8617  */
8618 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8619                                  bool with_fdi)
8620 {
8621         struct drm_i915_private *dev_priv = to_i915(dev);
8622         uint32_t reg, tmp;
8623
8624         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8625                 with_spread = true;
8626         if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8627                 with_fdi = false;
8628
8629         mutex_lock(&dev_priv->sb_lock);
8630
8631         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8632         tmp &= ~SBI_SSCCTL_DISABLE;
8633         tmp |= SBI_SSCCTL_PATHALT;
8634         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8635
8636         udelay(24);
8637
8638         if (with_spread) {
8639                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8640                 tmp &= ~SBI_SSCCTL_PATHALT;
8641                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8642
8643                 if (with_fdi) {
8644                         lpt_reset_fdi_mphy(dev_priv);
8645                         lpt_program_fdi_mphy(dev_priv);
8646                 }
8647         }
8648
8649         reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8650         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8651         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8652         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8653
8654         mutex_unlock(&dev_priv->sb_lock);
8655 }
8656
8657 /* Sequence to disable CLKOUT_DP */
8658 static void lpt_disable_clkout_dp(struct drm_device *dev)
8659 {
8660         struct drm_i915_private *dev_priv = to_i915(dev);
8661         uint32_t reg, tmp;
8662
8663         mutex_lock(&dev_priv->sb_lock);
8664
8665         reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8666         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8667         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8668         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8669
8670         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8671         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8672                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8673                         tmp |= SBI_SSCCTL_PATHALT;
8674                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8675                         udelay(32);
8676                 }
8677                 tmp |= SBI_SSCCTL_DISABLE;
8678                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8679         }
8680
8681         mutex_unlock(&dev_priv->sb_lock);
8682 }
8683
8684 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8685
8686 static const uint16_t sscdivintphase[] = {
8687         [BEND_IDX( 50)] = 0x3B23,
8688         [BEND_IDX( 45)] = 0x3B23,
8689         [BEND_IDX( 40)] = 0x3C23,
8690         [BEND_IDX( 35)] = 0x3C23,
8691         [BEND_IDX( 30)] = 0x3D23,
8692         [BEND_IDX( 25)] = 0x3D23,
8693         [BEND_IDX( 20)] = 0x3E23,
8694         [BEND_IDX( 15)] = 0x3E23,
8695         [BEND_IDX( 10)] = 0x3F23,
8696         [BEND_IDX(  5)] = 0x3F23,
8697         [BEND_IDX(  0)] = 0x0025,
8698         [BEND_IDX( -5)] = 0x0025,
8699         [BEND_IDX(-10)] = 0x0125,
8700         [BEND_IDX(-15)] = 0x0125,
8701         [BEND_IDX(-20)] = 0x0225,
8702         [BEND_IDX(-25)] = 0x0225,
8703         [BEND_IDX(-30)] = 0x0325,
8704         [BEND_IDX(-35)] = 0x0325,
8705         [BEND_IDX(-40)] = 0x0425,
8706         [BEND_IDX(-45)] = 0x0425,
8707         [BEND_IDX(-50)] = 0x0525,
8708 };
8709
8710 /*
8711  * Bend CLKOUT_DP
8712  * steps -50 to 50 inclusive, in steps of 5
8713  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8714  * change in clock period = -(steps / 10) * 5.787 ps
8715  */
8716 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8717 {
8718         uint32_t tmp;
8719         int idx = BEND_IDX(steps);
8720
8721         if (WARN_ON(steps % 5 != 0))
8722                 return;
8723
8724         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8725                 return;
8726
8727         mutex_lock(&dev_priv->sb_lock);
8728
8729         if (steps % 10 != 0)
8730                 tmp = 0xAAAAAAAB;
8731         else
8732                 tmp = 0x00000000;
8733         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8734
8735         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8736         tmp &= 0xffff0000;
8737         tmp |= sscdivintphase[idx];
8738         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8739
8740         mutex_unlock(&dev_priv->sb_lock);
8741 }
8742
8743 #undef BEND_IDX
8744
8745 static void lpt_init_pch_refclk(struct drm_device *dev)
8746 {
8747         struct intel_encoder *encoder;
8748         bool has_vga = false;
8749
8750         for_each_intel_encoder(dev, encoder) {
8751                 switch (encoder->type) {
8752                 case INTEL_OUTPUT_ANALOG:
8753                         has_vga = true;
8754                         break;
8755                 default:
8756                         break;
8757                 }
8758         }
8759
8760         if (has_vga) {
8761                 lpt_bend_clkout_dp(to_i915(dev), 0);
8762                 lpt_enable_clkout_dp(dev, true, true);
8763         } else {
8764                 lpt_disable_clkout_dp(dev);
8765         }
8766 }
8767
8768 /*
8769  * Initialize reference clocks when the driver loads
8770  */
8771 void intel_init_pch_refclk(struct drm_device *dev)
8772 {
8773         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8774                 ironlake_init_pch_refclk(dev);
8775         else if (HAS_PCH_LPT(dev))
8776                 lpt_init_pch_refclk(dev);
8777 }
8778
8779 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8780 {
8781         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8782         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8783         int pipe = intel_crtc->pipe;
8784         uint32_t val;
8785
8786         val = 0;
8787
8788         switch (intel_crtc->config->pipe_bpp) {
8789         case 18:
8790                 val |= PIPECONF_6BPC;
8791                 break;
8792         case 24:
8793                 val |= PIPECONF_8BPC;
8794                 break;
8795         case 30:
8796                 val |= PIPECONF_10BPC;
8797                 break;
8798         case 36:
8799                 val |= PIPECONF_12BPC;
8800                 break;
8801         default:
8802                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8803                 BUG();
8804         }
8805
8806         if (intel_crtc->config->dither)
8807                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8808
8809         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8810                 val |= PIPECONF_INTERLACED_ILK;
8811         else
8812                 val |= PIPECONF_PROGRESSIVE;
8813
8814         if (intel_crtc->config->limited_color_range)
8815                 val |= PIPECONF_COLOR_RANGE_SELECT;
8816
8817         I915_WRITE(PIPECONF(pipe), val);
8818         POSTING_READ(PIPECONF(pipe));
8819 }
8820
8821 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8822 {
8823         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8824         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8825         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8826         u32 val = 0;
8827
8828         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8829                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8830
8831         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8832                 val |= PIPECONF_INTERLACED_ILK;
8833         else
8834                 val |= PIPECONF_PROGRESSIVE;
8835
8836         I915_WRITE(PIPECONF(cpu_transcoder), val);
8837         POSTING_READ(PIPECONF(cpu_transcoder));
8838 }
8839
8840 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8841 {
8842         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8843         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8844
8845         if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8846                 u32 val = 0;
8847
8848                 switch (intel_crtc->config->pipe_bpp) {
8849                 case 18:
8850                         val |= PIPEMISC_DITHER_6_BPC;
8851                         break;
8852                 case 24:
8853                         val |= PIPEMISC_DITHER_8_BPC;
8854                         break;
8855                 case 30:
8856                         val |= PIPEMISC_DITHER_10_BPC;
8857                         break;
8858                 case 36:
8859                         val |= PIPEMISC_DITHER_12_BPC;
8860                         break;
8861                 default:
8862                         /* Case prevented by pipe_config_set_bpp. */
8863                         BUG();
8864                 }
8865
8866                 if (intel_crtc->config->dither)
8867                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8868
8869                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8870         }
8871 }
8872
8873 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8874 {
8875         /*
8876          * Account for spread spectrum to avoid
8877          * oversubscribing the link. Max center spread
8878          * is 2.5%; use 5% for safety's sake.
8879          */
8880         u32 bps = target_clock * bpp * 21 / 20;
8881         return DIV_ROUND_UP(bps, link_bw * 8);
8882 }
8883
8884 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8885 {
8886         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8887 }
8888
8889 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8890                                   struct intel_crtc_state *crtc_state,
8891                                   struct dpll *reduced_clock)
8892 {
8893         struct drm_crtc *crtc = &intel_crtc->base;
8894         struct drm_device *dev = crtc->dev;
8895         struct drm_i915_private *dev_priv = to_i915(dev);
8896         u32 dpll, fp, fp2;
8897         int factor;
8898
8899         /* Enable autotuning of the PLL clock (if permissible) */
8900         factor = 21;
8901         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8902                 if ((intel_panel_use_ssc(dev_priv) &&
8903                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8904                     (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8905                         factor = 25;
8906         } else if (crtc_state->sdvo_tv_clock)
8907                 factor = 20;
8908
8909         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8910
8911         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8912                 fp |= FP_CB_TUNE;
8913
8914         if (reduced_clock) {
8915                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8916
8917                 if (reduced_clock->m < factor * reduced_clock->n)
8918                         fp2 |= FP_CB_TUNE;
8919         } else {
8920                 fp2 = fp;
8921         }
8922
8923         dpll = 0;
8924
8925         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8926                 dpll |= DPLLB_MODE_LVDS;
8927         else
8928                 dpll |= DPLLB_MODE_DAC_SERIAL;
8929
8930         dpll |= (crtc_state->pixel_multiplier - 1)
8931                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8932
8933         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8934             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8935                 dpll |= DPLL_SDVO_HIGH_SPEED;
8936
8937         if (intel_crtc_has_dp_encoder(crtc_state))
8938                 dpll |= DPLL_SDVO_HIGH_SPEED;
8939
8940         /* compute bitmask from p1 value */
8941         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8942         /* also FPA1 */
8943         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8944
8945         switch (crtc_state->dpll.p2) {
8946         case 5:
8947                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8948                 break;
8949         case 7:
8950                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8951                 break;
8952         case 10:
8953                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8954                 break;
8955         case 14:
8956                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8957                 break;
8958         }
8959
8960         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8961             intel_panel_use_ssc(dev_priv))
8962                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8963         else
8964                 dpll |= PLL_REF_INPUT_DREFCLK;
8965
8966         dpll |= DPLL_VCO_ENABLE;
8967
8968         crtc_state->dpll_hw_state.dpll = dpll;
8969         crtc_state->dpll_hw_state.fp0 = fp;
8970         crtc_state->dpll_hw_state.fp1 = fp2;
8971 }
8972
8973 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8974                                        struct intel_crtc_state *crtc_state)
8975 {
8976         struct drm_device *dev = crtc->base.dev;
8977         struct drm_i915_private *dev_priv = to_i915(dev);
8978         struct dpll reduced_clock;
8979         bool has_reduced_clock = false;
8980         struct intel_shared_dpll *pll;
8981         const struct intel_limit *limit;
8982         int refclk = 120000;
8983
8984         memset(&crtc_state->dpll_hw_state, 0,
8985                sizeof(crtc_state->dpll_hw_state));
8986
8987         crtc->lowfreq_avail = false;
8988
8989         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8990         if (!crtc_state->has_pch_encoder)
8991                 return 0;
8992
8993         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8994                 if (intel_panel_use_ssc(dev_priv)) {
8995                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8996                                       dev_priv->vbt.lvds_ssc_freq);
8997                         refclk = dev_priv->vbt.lvds_ssc_freq;
8998                 }
8999
9000                 if (intel_is_dual_link_lvds(dev)) {
9001                         if (refclk == 100000)
9002                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9003                         else
9004                                 limit = &intel_limits_ironlake_dual_lvds;
9005                 } else {
9006                         if (refclk == 100000)
9007                                 limit = &intel_limits_ironlake_single_lvds_100m;
9008                         else
9009                                 limit = &intel_limits_ironlake_single_lvds;
9010                 }
9011         } else {
9012                 limit = &intel_limits_ironlake_dac;
9013         }
9014
9015         if (!crtc_state->clock_set &&
9016             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9017                                 refclk, NULL, &crtc_state->dpll)) {
9018                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9019                 return -EINVAL;
9020         }
9021
9022         ironlake_compute_dpll(crtc, crtc_state,
9023                               has_reduced_clock ? &reduced_clock : NULL);
9024
9025         pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
9026         if (pll == NULL) {
9027                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9028                                  pipe_name(crtc->pipe));
9029                 return -EINVAL;
9030         }
9031
9032         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9033             has_reduced_clock)
9034                 crtc->lowfreq_avail = true;
9035
9036         return 0;
9037 }
9038
9039 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9040                                          struct intel_link_m_n *m_n)
9041 {
9042         struct drm_device *dev = crtc->base.dev;
9043         struct drm_i915_private *dev_priv = to_i915(dev);
9044         enum pipe pipe = crtc->pipe;
9045
9046         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9047         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9048         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9049                 & ~TU_SIZE_MASK;
9050         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9051         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9052                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9053 }
9054
9055 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9056                                          enum transcoder transcoder,
9057                                          struct intel_link_m_n *m_n,
9058                                          struct intel_link_m_n *m2_n2)
9059 {
9060         struct drm_device *dev = crtc->base.dev;
9061         struct drm_i915_private *dev_priv = to_i915(dev);
9062         enum pipe pipe = crtc->pipe;
9063
9064         if (INTEL_INFO(dev)->gen >= 5) {
9065                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9066                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9067                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9068                         & ~TU_SIZE_MASK;
9069                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9070                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9071                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9072                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9073                  * gen < 8) and if DRRS is supported (to make sure the
9074                  * registers are not unnecessarily read).
9075                  */
9076                 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9077                         crtc->config->has_drrs) {
9078                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9079                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9080                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9081                                         & ~TU_SIZE_MASK;
9082                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9083                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9084                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9085                 }
9086         } else {
9087                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9088                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9089                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9090                         & ~TU_SIZE_MASK;
9091                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9092                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9093                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9094         }
9095 }
9096
9097 void intel_dp_get_m_n(struct intel_crtc *crtc,
9098                       struct intel_crtc_state *pipe_config)
9099 {
9100         if (pipe_config->has_pch_encoder)
9101                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9102         else
9103                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9104                                              &pipe_config->dp_m_n,
9105                                              &pipe_config->dp_m2_n2);
9106 }
9107
9108 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9109                                         struct intel_crtc_state *pipe_config)
9110 {
9111         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9112                                      &pipe_config->fdi_m_n, NULL);
9113 }
9114
9115 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9116                                     struct intel_crtc_state *pipe_config)
9117 {
9118         struct drm_device *dev = crtc->base.dev;
9119         struct drm_i915_private *dev_priv = to_i915(dev);
9120         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9121         uint32_t ps_ctrl = 0;
9122         int id = -1;
9123         int i;
9124
9125         /* find scaler attached to this pipe */
9126         for (i = 0; i < crtc->num_scalers; i++) {
9127                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9128                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9129                         id = i;
9130                         pipe_config->pch_pfit.enabled = true;
9131                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9132                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9133                         break;
9134                 }
9135         }
9136
9137         scaler_state->scaler_id = id;
9138         if (id >= 0) {
9139                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9140         } else {
9141                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9142         }
9143 }
9144
9145 static void
9146 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9147                                  struct intel_initial_plane_config *plane_config)
9148 {
9149         struct drm_device *dev = crtc->base.dev;
9150         struct drm_i915_private *dev_priv = to_i915(dev);
9151         u32 val, base, offset, stride_mult, tiling;
9152         int pipe = crtc->pipe;
9153         int fourcc, pixel_format;
9154         unsigned int aligned_height;
9155         struct drm_framebuffer *fb;
9156         struct intel_framebuffer *intel_fb;
9157
9158         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9159         if (!intel_fb) {
9160                 DRM_DEBUG_KMS("failed to alloc fb\n");
9161                 return;
9162         }
9163
9164         fb = &intel_fb->base;
9165
9166         val = I915_READ(PLANE_CTL(pipe, 0));
9167         if (!(val & PLANE_CTL_ENABLE))
9168                 goto error;
9169
9170         pixel_format = val & PLANE_CTL_FORMAT_MASK;
9171         fourcc = skl_format_to_fourcc(pixel_format,
9172                                       val & PLANE_CTL_ORDER_RGBX,
9173                                       val & PLANE_CTL_ALPHA_MASK);
9174         fb->pixel_format = fourcc;
9175         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9176
9177         tiling = val & PLANE_CTL_TILED_MASK;
9178         switch (tiling) {
9179         case PLANE_CTL_TILED_LINEAR:
9180                 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9181                 break;
9182         case PLANE_CTL_TILED_X:
9183                 plane_config->tiling = I915_TILING_X;
9184                 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9185                 break;
9186         case PLANE_CTL_TILED_Y:
9187                 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9188                 break;
9189         case PLANE_CTL_TILED_YF:
9190                 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9191                 break;
9192         default:
9193                 MISSING_CASE(tiling);
9194                 goto error;
9195         }
9196
9197         base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9198         plane_config->base = base;
9199
9200         offset = I915_READ(PLANE_OFFSET(pipe, 0));
9201
9202         val = I915_READ(PLANE_SIZE(pipe, 0));
9203         fb->height = ((val >> 16) & 0xfff) + 1;
9204         fb->width = ((val >> 0) & 0x1fff) + 1;
9205
9206         val = I915_READ(PLANE_STRIDE(pipe, 0));
9207         stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9208                                                 fb->pixel_format);
9209         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9210
9211         aligned_height = intel_fb_align_height(dev, fb->height,
9212                                                fb->pixel_format,
9213                                                fb->modifier[0]);
9214
9215         plane_config->size = fb->pitches[0] * aligned_height;
9216
9217         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9218                       pipe_name(pipe), fb->width, fb->height,
9219                       fb->bits_per_pixel, base, fb->pitches[0],
9220                       plane_config->size);
9221
9222         plane_config->fb = intel_fb;
9223         return;
9224
9225 error:
9226         kfree(fb);
9227 }
9228
9229 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9230                                      struct intel_crtc_state *pipe_config)
9231 {
9232         struct drm_device *dev = crtc->base.dev;
9233         struct drm_i915_private *dev_priv = to_i915(dev);
9234         uint32_t tmp;
9235
9236         tmp = I915_READ(PF_CTL(crtc->pipe));
9237
9238         if (tmp & PF_ENABLE) {
9239                 pipe_config->pch_pfit.enabled = true;
9240                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9241                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9242
9243                 /* We currently do not free assignements of panel fitters on
9244                  * ivb/hsw (since we don't use the higher upscaling modes which
9245                  * differentiates them) so just WARN about this case for now. */
9246                 if (IS_GEN7(dev)) {
9247                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9248                                 PF_PIPE_SEL_IVB(crtc->pipe));
9249                 }
9250         }
9251 }
9252
9253 static void
9254 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9255                                   struct intel_initial_plane_config *plane_config)
9256 {
9257         struct drm_device *dev = crtc->base.dev;
9258         struct drm_i915_private *dev_priv = to_i915(dev);
9259         u32 val, base, offset;
9260         int pipe = crtc->pipe;
9261         int fourcc, pixel_format;
9262         unsigned int aligned_height;
9263         struct drm_framebuffer *fb;
9264         struct intel_framebuffer *intel_fb;
9265
9266         val = I915_READ(DSPCNTR(pipe));
9267         if (!(val & DISPLAY_PLANE_ENABLE))
9268                 return;
9269
9270         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9271         if (!intel_fb) {
9272                 DRM_DEBUG_KMS("failed to alloc fb\n");
9273                 return;
9274         }
9275
9276         fb = &intel_fb->base;
9277
9278         if (INTEL_INFO(dev)->gen >= 4) {
9279                 if (val & DISPPLANE_TILED) {
9280                         plane_config->tiling = I915_TILING_X;
9281                         fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9282                 }
9283         }
9284
9285         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9286         fourcc = i9xx_format_to_fourcc(pixel_format);
9287         fb->pixel_format = fourcc;
9288         fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9289
9290         base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9291         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9292                 offset = I915_READ(DSPOFFSET(pipe));
9293         } else {
9294                 if (plane_config->tiling)
9295                         offset = I915_READ(DSPTILEOFF(pipe));
9296                 else
9297                         offset = I915_READ(DSPLINOFF(pipe));
9298         }
9299         plane_config->base = base;
9300
9301         val = I915_READ(PIPESRC(pipe));
9302         fb->width = ((val >> 16) & 0xfff) + 1;
9303         fb->height = ((val >> 0) & 0xfff) + 1;
9304
9305         val = I915_READ(DSPSTRIDE(pipe));
9306         fb->pitches[0] = val & 0xffffffc0;
9307
9308         aligned_height = intel_fb_align_height(dev, fb->height,
9309                                                fb->pixel_format,
9310                                                fb->modifier[0]);
9311
9312         plane_config->size = fb->pitches[0] * aligned_height;
9313
9314         DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9315                       pipe_name(pipe), fb->width, fb->height,
9316                       fb->bits_per_pixel, base, fb->pitches[0],
9317                       plane_config->size);
9318
9319         plane_config->fb = intel_fb;
9320 }
9321
9322 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9323                                      struct intel_crtc_state *pipe_config)
9324 {
9325         struct drm_device *dev = crtc->base.dev;
9326         struct drm_i915_private *dev_priv = to_i915(dev);
9327         enum intel_display_power_domain power_domain;
9328         uint32_t tmp;
9329         bool ret;
9330
9331         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9332         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9333                 return false;
9334
9335         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9336         pipe_config->shared_dpll = NULL;
9337
9338         ret = false;
9339         tmp = I915_READ(PIPECONF(crtc->pipe));
9340         if (!(tmp & PIPECONF_ENABLE))
9341                 goto out;
9342
9343         switch (tmp & PIPECONF_BPC_MASK) {
9344         case PIPECONF_6BPC:
9345                 pipe_config->pipe_bpp = 18;
9346                 break;
9347         case PIPECONF_8BPC:
9348                 pipe_config->pipe_bpp = 24;
9349                 break;
9350         case PIPECONF_10BPC:
9351                 pipe_config->pipe_bpp = 30;
9352                 break;
9353         case PIPECONF_12BPC:
9354                 pipe_config->pipe_bpp = 36;
9355                 break;
9356         default:
9357                 break;
9358         }
9359
9360         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9361                 pipe_config->limited_color_range = true;
9362
9363         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9364                 struct intel_shared_dpll *pll;
9365                 enum intel_dpll_id pll_id;
9366
9367                 pipe_config->has_pch_encoder = true;
9368
9369                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9370                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9371                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9372
9373                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9374
9375                 if (HAS_PCH_IBX(dev_priv)) {
9376                         /*
9377                          * The pipe->pch transcoder and pch transcoder->pll
9378                          * mapping is fixed.
9379                          */
9380                         pll_id = (enum intel_dpll_id) crtc->pipe;
9381                 } else {
9382                         tmp = I915_READ(PCH_DPLL_SEL);
9383                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9384                                 pll_id = DPLL_ID_PCH_PLL_B;
9385                         else
9386                                 pll_id= DPLL_ID_PCH_PLL_A;
9387                 }
9388
9389                 pipe_config->shared_dpll =
9390                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9391                 pll = pipe_config->shared_dpll;
9392
9393                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9394                                                  &pipe_config->dpll_hw_state));
9395
9396                 tmp = pipe_config->dpll_hw_state.dpll;
9397                 pipe_config->pixel_multiplier =
9398                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9399                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9400
9401                 ironlake_pch_clock_get(crtc, pipe_config);
9402         } else {
9403                 pipe_config->pixel_multiplier = 1;
9404         }
9405
9406         intel_get_pipe_timings(crtc, pipe_config);
9407         intel_get_pipe_src_size(crtc, pipe_config);
9408
9409         ironlake_get_pfit_config(crtc, pipe_config);
9410
9411         ret = true;
9412
9413 out:
9414         intel_display_power_put(dev_priv, power_domain);
9415
9416         return ret;
9417 }
9418
9419 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9420 {
9421         struct drm_device *dev = &dev_priv->drm;
9422         struct intel_crtc *crtc;
9423
9424         for_each_intel_crtc(dev, crtc)
9425                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9426                      pipe_name(crtc->pipe));
9427
9428         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9429         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9430         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9431         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9432         I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9433         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9434              "CPU PWM1 enabled\n");
9435         if (IS_HASWELL(dev))
9436                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9437                      "CPU PWM2 enabled\n");
9438         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9439              "PCH PWM1 enabled\n");
9440         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9441              "Utility pin enabled\n");
9442         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9443
9444         /*
9445          * In theory we can still leave IRQs enabled, as long as only the HPD
9446          * interrupts remain enabled. We used to check for that, but since it's
9447          * gen-specific and since we only disable LCPLL after we fully disable
9448          * the interrupts, the check below should be enough.
9449          */
9450         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9451 }
9452
9453 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9454 {
9455         struct drm_device *dev = &dev_priv->drm;
9456
9457         if (IS_HASWELL(dev))
9458                 return I915_READ(D_COMP_HSW);
9459         else
9460                 return I915_READ(D_COMP_BDW);
9461 }
9462
9463 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9464 {
9465         struct drm_device *dev = &dev_priv->drm;
9466
9467         if (IS_HASWELL(dev)) {
9468                 mutex_lock(&dev_priv->rps.hw_lock);
9469                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9470                                             val))
9471                         DRM_ERROR("Failed to write to D_COMP\n");
9472                 mutex_unlock(&dev_priv->rps.hw_lock);
9473         } else {
9474                 I915_WRITE(D_COMP_BDW, val);
9475                 POSTING_READ(D_COMP_BDW);
9476         }
9477 }
9478
9479 /*
9480  * This function implements pieces of two sequences from BSpec:
9481  * - Sequence for display software to disable LCPLL
9482  * - Sequence for display software to allow package C8+
9483  * The steps implemented here are just the steps that actually touch the LCPLL
9484  * register. Callers should take care of disabling all the display engine
9485  * functions, doing the mode unset, fixing interrupts, etc.
9486  */
9487 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9488                               bool switch_to_fclk, bool allow_power_down)
9489 {
9490         uint32_t val;
9491
9492         assert_can_disable_lcpll(dev_priv);
9493
9494         val = I915_READ(LCPLL_CTL);
9495
9496         if (switch_to_fclk) {
9497                 val |= LCPLL_CD_SOURCE_FCLK;
9498                 I915_WRITE(LCPLL_CTL, val);
9499
9500                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9501                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9502                         DRM_ERROR("Switching to FCLK failed\n");
9503
9504                 val = I915_READ(LCPLL_CTL);
9505         }
9506
9507         val |= LCPLL_PLL_DISABLE;
9508         I915_WRITE(LCPLL_CTL, val);
9509         POSTING_READ(LCPLL_CTL);
9510
9511         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9512                 DRM_ERROR("LCPLL still locked\n");
9513
9514         val = hsw_read_dcomp(dev_priv);
9515         val |= D_COMP_COMP_DISABLE;
9516         hsw_write_dcomp(dev_priv, val);
9517         ndelay(100);
9518
9519         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9520                      1))
9521                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9522
9523         if (allow_power_down) {
9524                 val = I915_READ(LCPLL_CTL);
9525                 val |= LCPLL_POWER_DOWN_ALLOW;
9526                 I915_WRITE(LCPLL_CTL, val);
9527                 POSTING_READ(LCPLL_CTL);
9528         }
9529 }
9530
9531 /*
9532  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9533  * source.
9534  */
9535 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9536 {
9537         uint32_t val;
9538
9539         val = I915_READ(LCPLL_CTL);
9540
9541         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9542                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9543                 return;
9544
9545         /*
9546          * Make sure we're not on PC8 state before disabling PC8, otherwise
9547          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9548          */
9549         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9550
9551         if (val & LCPLL_POWER_DOWN_ALLOW) {
9552                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9553                 I915_WRITE(LCPLL_CTL, val);
9554                 POSTING_READ(LCPLL_CTL);
9555         }
9556
9557         val = hsw_read_dcomp(dev_priv);
9558         val |= D_COMP_COMP_FORCE;
9559         val &= ~D_COMP_COMP_DISABLE;
9560         hsw_write_dcomp(dev_priv, val);
9561
9562         val = I915_READ(LCPLL_CTL);
9563         val &= ~LCPLL_PLL_DISABLE;
9564         I915_WRITE(LCPLL_CTL, val);
9565
9566         if (intel_wait_for_register(dev_priv,
9567                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9568                                     5))
9569                 DRM_ERROR("LCPLL not locked yet\n");
9570
9571         if (val & LCPLL_CD_SOURCE_FCLK) {
9572                 val = I915_READ(LCPLL_CTL);
9573                 val &= ~LCPLL_CD_SOURCE_FCLK;
9574                 I915_WRITE(LCPLL_CTL, val);
9575
9576                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9577                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9578                         DRM_ERROR("Switching back to LCPLL failed\n");
9579         }
9580
9581         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9582         intel_update_cdclk(&dev_priv->drm);
9583 }
9584
9585 /*
9586  * Package states C8 and deeper are really deep PC states that can only be
9587  * reached when all the devices on the system allow it, so even if the graphics
9588  * device allows PC8+, it doesn't mean the system will actually get to these
9589  * states. Our driver only allows PC8+ when going into runtime PM.
9590  *
9591  * The requirements for PC8+ are that all the outputs are disabled, the power
9592  * well is disabled and most interrupts are disabled, and these are also
9593  * requirements for runtime PM. When these conditions are met, we manually do
9594  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9595  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9596  * hang the machine.
9597  *
9598  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9599  * the state of some registers, so when we come back from PC8+ we need to
9600  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9601  * need to take care of the registers kept by RC6. Notice that this happens even
9602  * if we don't put the device in PCI D3 state (which is what currently happens
9603  * because of the runtime PM support).
9604  *
9605  * For more, read "Display Sequences for Package C8" on the hardware
9606  * documentation.
9607  */
9608 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9609 {
9610         struct drm_device *dev = &dev_priv->drm;
9611         uint32_t val;
9612
9613         DRM_DEBUG_KMS("Enabling package C8+\n");
9614
9615         if (HAS_PCH_LPT_LP(dev)) {
9616                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9617                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9618                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9619         }
9620
9621         lpt_disable_clkout_dp(dev);
9622         hsw_disable_lcpll(dev_priv, true, true);
9623 }
9624
9625 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9626 {
9627         struct drm_device *dev = &dev_priv->drm;
9628         uint32_t val;
9629
9630         DRM_DEBUG_KMS("Disabling package C8+\n");
9631
9632         hsw_restore_lcpll(dev_priv);
9633         lpt_init_pch_refclk(dev);
9634
9635         if (HAS_PCH_LPT_LP(dev)) {
9636                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9637                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9638                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9639         }
9640 }
9641
9642 static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9643 {
9644         struct drm_device *dev = old_state->dev;
9645         struct intel_atomic_state *old_intel_state =
9646                 to_intel_atomic_state(old_state);
9647         unsigned int req_cdclk = old_intel_state->dev_cdclk;
9648
9649         bxt_set_cdclk(to_i915(dev), req_cdclk);
9650 }
9651
9652 /* compute the max rate for new configuration */
9653 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9654 {
9655         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9656         struct drm_i915_private *dev_priv = to_i915(state->dev);
9657         struct drm_crtc *crtc;
9658         struct drm_crtc_state *cstate;
9659         struct intel_crtc_state *crtc_state;
9660         unsigned max_pixel_rate = 0, i;
9661         enum pipe pipe;
9662
9663         memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9664                sizeof(intel_state->min_pixclk));
9665
9666         for_each_crtc_in_state(state, crtc, cstate, i) {
9667                 int pixel_rate;
9668
9669                 crtc_state = to_intel_crtc_state(cstate);
9670                 if (!crtc_state->base.enable) {
9671                         intel_state->min_pixclk[i] = 0;
9672                         continue;
9673                 }
9674
9675                 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9676
9677                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9678                 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9679                         pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9680
9681                 intel_state->min_pixclk[i] = pixel_rate;
9682         }
9683
9684         for_each_pipe(dev_priv, pipe)
9685                 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9686
9687         return max_pixel_rate;
9688 }
9689
9690 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9691 {
9692         struct drm_i915_private *dev_priv = to_i915(dev);
9693         uint32_t val, data;
9694         int ret;
9695
9696         if (WARN((I915_READ(LCPLL_CTL) &
9697                   (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9698                    LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9699                    LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9700                    LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9701                  "trying to change cdclk frequency with cdclk not enabled\n"))
9702                 return;
9703
9704         mutex_lock(&dev_priv->rps.hw_lock);
9705         ret = sandybridge_pcode_write(dev_priv,
9706                                       BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9707         mutex_unlock(&dev_priv->rps.hw_lock);
9708         if (ret) {
9709                 DRM_ERROR("failed to inform pcode about cdclk change\n");
9710                 return;
9711         }
9712
9713         val = I915_READ(LCPLL_CTL);
9714         val |= LCPLL_CD_SOURCE_FCLK;
9715         I915_WRITE(LCPLL_CTL, val);
9716
9717         if (wait_for_us(I915_READ(LCPLL_CTL) &
9718                         LCPLL_CD_SOURCE_FCLK_DONE, 1))
9719                 DRM_ERROR("Switching to FCLK failed\n");
9720
9721         val = I915_READ(LCPLL_CTL);
9722         val &= ~LCPLL_CLK_FREQ_MASK;
9723
9724         switch (cdclk) {
9725         case 450000:
9726                 val |= LCPLL_CLK_FREQ_450;
9727                 data = 0;
9728                 break;
9729         case 540000:
9730                 val |= LCPLL_CLK_FREQ_54O_BDW;
9731                 data = 1;
9732                 break;
9733         case 337500:
9734                 val |= LCPLL_CLK_FREQ_337_5_BDW;
9735                 data = 2;
9736                 break;
9737         case 675000:
9738                 val |= LCPLL_CLK_FREQ_675_BDW;
9739                 data = 3;
9740                 break;
9741         default:
9742                 WARN(1, "invalid cdclk frequency\n");
9743                 return;
9744         }
9745
9746         I915_WRITE(LCPLL_CTL, val);
9747
9748         val = I915_READ(LCPLL_CTL);
9749         val &= ~LCPLL_CD_SOURCE_FCLK;
9750         I915_WRITE(LCPLL_CTL, val);
9751
9752         if (wait_for_us((I915_READ(LCPLL_CTL) &
9753                         LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9754                 DRM_ERROR("Switching back to LCPLL failed\n");
9755
9756         mutex_lock(&dev_priv->rps.hw_lock);
9757         sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9758         mutex_unlock(&dev_priv->rps.hw_lock);
9759
9760         I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9761
9762         intel_update_cdclk(dev);
9763
9764         WARN(cdclk != dev_priv->cdclk_freq,
9765              "cdclk requested %d kHz but got %d kHz\n",
9766              cdclk, dev_priv->cdclk_freq);
9767 }
9768
9769 static int broadwell_calc_cdclk(int max_pixclk)
9770 {
9771         if (max_pixclk > 540000)
9772                 return 675000;
9773         else if (max_pixclk > 450000)
9774                 return 540000;
9775         else if (max_pixclk > 337500)
9776                 return 450000;
9777         else
9778                 return 337500;
9779 }
9780
9781 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9782 {
9783         struct drm_i915_private *dev_priv = to_i915(state->dev);
9784         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9785         int max_pixclk = ilk_max_pixel_rate(state);
9786         int cdclk;
9787
9788         /*
9789          * FIXME should also account for plane ratio
9790          * once 64bpp pixel formats are supported.
9791          */
9792         cdclk = broadwell_calc_cdclk(max_pixclk);
9793
9794         if (cdclk > dev_priv->max_cdclk_freq) {
9795                 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9796                               cdclk, dev_priv->max_cdclk_freq);
9797                 return -EINVAL;
9798         }
9799
9800         intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9801         if (!intel_state->active_crtcs)
9802                 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9803
9804         return 0;
9805 }
9806
9807 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9808 {
9809         struct drm_device *dev = old_state->dev;
9810         struct intel_atomic_state *old_intel_state =
9811                 to_intel_atomic_state(old_state);
9812         unsigned req_cdclk = old_intel_state->dev_cdclk;
9813
9814         broadwell_set_cdclk(dev, req_cdclk);
9815 }
9816
9817 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9818 {
9819         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9820         struct drm_i915_private *dev_priv = to_i915(state->dev);
9821         const int max_pixclk = ilk_max_pixel_rate(state);
9822         int vco = intel_state->cdclk_pll_vco;
9823         int cdclk;
9824
9825         /*
9826          * FIXME should also account for plane ratio
9827          * once 64bpp pixel formats are supported.
9828          */
9829         cdclk = skl_calc_cdclk(max_pixclk, vco);
9830
9831         /*
9832          * FIXME move the cdclk caclulation to
9833          * compute_config() so we can fail gracegully.
9834          */
9835         if (cdclk > dev_priv->max_cdclk_freq) {
9836                 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9837                           cdclk, dev_priv->max_cdclk_freq);
9838                 cdclk = dev_priv->max_cdclk_freq;
9839         }
9840
9841         intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9842         if (!intel_state->active_crtcs)
9843                 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9844
9845         return 0;
9846 }
9847
9848 static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9849 {
9850         struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9851         struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9852         unsigned int req_cdclk = intel_state->dev_cdclk;
9853         unsigned int req_vco = intel_state->cdclk_pll_vco;
9854
9855         skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9856 }
9857
9858 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9859                                       struct intel_crtc_state *crtc_state)
9860 {
9861         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9862                 if (!intel_ddi_pll_select(crtc, crtc_state))
9863                         return -EINVAL;
9864         }
9865
9866         crtc->lowfreq_avail = false;
9867
9868         return 0;
9869 }
9870
9871 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9872                                 enum port port,
9873                                 struct intel_crtc_state *pipe_config)
9874 {
9875         enum intel_dpll_id id;
9876
9877         switch (port) {
9878         case PORT_A:
9879                 pipe_config->ddi_pll_sel = SKL_DPLL0;
9880                 id = DPLL_ID_SKL_DPLL0;
9881                 break;
9882         case PORT_B:
9883                 pipe_config->ddi_pll_sel = SKL_DPLL1;
9884                 id = DPLL_ID_SKL_DPLL1;
9885                 break;
9886         case PORT_C:
9887                 pipe_config->ddi_pll_sel = SKL_DPLL2;
9888                 id = DPLL_ID_SKL_DPLL2;
9889                 break;
9890         default:
9891                 DRM_ERROR("Incorrect port type\n");
9892                 return;
9893         }
9894
9895         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9896 }
9897
9898 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9899                                 enum port port,
9900                                 struct intel_crtc_state *pipe_config)
9901 {
9902         enum intel_dpll_id id;
9903         u32 temp;
9904
9905         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9906         pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9907
9908         switch (pipe_config->ddi_pll_sel) {
9909         case SKL_DPLL0:
9910                 id = DPLL_ID_SKL_DPLL0;
9911                 break;
9912         case SKL_DPLL1:
9913                 id = DPLL_ID_SKL_DPLL1;
9914                 break;
9915         case SKL_DPLL2:
9916                 id = DPLL_ID_SKL_DPLL2;
9917                 break;
9918         case SKL_DPLL3:
9919                 id = DPLL_ID_SKL_DPLL3;
9920                 break;
9921         default:
9922                 MISSING_CASE(pipe_config->ddi_pll_sel);
9923                 return;
9924         }
9925
9926         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9927 }
9928
9929 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9930                                 enum port port,
9931                                 struct intel_crtc_state *pipe_config)
9932 {
9933         enum intel_dpll_id id;
9934
9935         pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9936
9937         switch (pipe_config->ddi_pll_sel) {
9938         case PORT_CLK_SEL_WRPLL1:
9939                 id = DPLL_ID_WRPLL1;
9940                 break;
9941         case PORT_CLK_SEL_WRPLL2:
9942                 id = DPLL_ID_WRPLL2;
9943                 break;
9944         case PORT_CLK_SEL_SPLL:
9945                 id = DPLL_ID_SPLL;
9946                 break;
9947         case PORT_CLK_SEL_LCPLL_810:
9948                 id = DPLL_ID_LCPLL_810;
9949                 break;
9950         case PORT_CLK_SEL_LCPLL_1350:
9951                 id = DPLL_ID_LCPLL_1350;
9952                 break;
9953         case PORT_CLK_SEL_LCPLL_2700:
9954                 id = DPLL_ID_LCPLL_2700;
9955                 break;
9956         default:
9957                 MISSING_CASE(pipe_config->ddi_pll_sel);
9958                 /* fall through */
9959         case PORT_CLK_SEL_NONE:
9960                 return;
9961         }
9962
9963         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9964 }
9965
9966 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9967                                      struct intel_crtc_state *pipe_config,
9968                                      unsigned long *power_domain_mask)
9969 {
9970         struct drm_device *dev = crtc->base.dev;
9971         struct drm_i915_private *dev_priv = to_i915(dev);
9972         enum intel_display_power_domain power_domain;
9973         u32 tmp;
9974
9975         /*
9976          * The pipe->transcoder mapping is fixed with the exception of the eDP
9977          * transcoder handled below.
9978          */
9979         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9980
9981         /*
9982          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9983          * consistency and less surprising code; it's in always on power).
9984          */
9985         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9986         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9987                 enum pipe trans_edp_pipe;
9988                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9989                 default:
9990                         WARN(1, "unknown pipe linked to edp transcoder\n");
9991                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9992                 case TRANS_DDI_EDP_INPUT_A_ON:
9993                         trans_edp_pipe = PIPE_A;
9994                         break;
9995                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9996                         trans_edp_pipe = PIPE_B;
9997                         break;
9998                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9999                         trans_edp_pipe = PIPE_C;
10000                         break;
10001                 }
10002
10003                 if (trans_edp_pipe == crtc->pipe)
10004                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
10005         }
10006
10007         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10008         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10009                 return false;
10010         *power_domain_mask |= BIT(power_domain);
10011
10012         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10013
10014         return tmp & PIPECONF_ENABLE;
10015 }
10016
10017 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10018                                          struct intel_crtc_state *pipe_config,
10019                                          unsigned long *power_domain_mask)
10020 {
10021         struct drm_device *dev = crtc->base.dev;
10022         struct drm_i915_private *dev_priv = to_i915(dev);
10023         enum intel_display_power_domain power_domain;
10024         enum port port;
10025         enum transcoder cpu_transcoder;
10026         u32 tmp;
10027
10028         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10029                 if (port == PORT_A)
10030                         cpu_transcoder = TRANSCODER_DSI_A;
10031                 else
10032                         cpu_transcoder = TRANSCODER_DSI_C;
10033
10034                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10035                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10036                         continue;
10037                 *power_domain_mask |= BIT(power_domain);
10038
10039                 /*
10040                  * The PLL needs to be enabled with a valid divider
10041                  * configuration, otherwise accessing DSI registers will hang
10042                  * the machine. See BSpec North Display Engine
10043                  * registers/MIPI[BXT]. We can break out here early, since we
10044                  * need the same DSI PLL to be enabled for both DSI ports.
10045                  */
10046                 if (!intel_dsi_pll_is_enabled(dev_priv))
10047                         break;
10048
10049                 /* XXX: this works for video mode only */
10050                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10051                 if (!(tmp & DPI_ENABLE))
10052                         continue;
10053
10054                 tmp = I915_READ(MIPI_CTRL(port));
10055                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10056                         continue;
10057
10058                 pipe_config->cpu_transcoder = cpu_transcoder;
10059                 break;
10060         }
10061
10062         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10063 }
10064
10065 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10066                                        struct intel_crtc_state *pipe_config)
10067 {
10068         struct drm_device *dev = crtc->base.dev;
10069         struct drm_i915_private *dev_priv = to_i915(dev);
10070         struct intel_shared_dpll *pll;
10071         enum port port;
10072         uint32_t tmp;
10073
10074         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10075
10076         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10077
10078         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
10079                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10080         else if (IS_BROXTON(dev))
10081                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10082         else
10083                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10084
10085         pll = pipe_config->shared_dpll;
10086         if (pll) {
10087                 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10088                                                  &pipe_config->dpll_hw_state));
10089         }
10090
10091         /*
10092          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10093          * DDI E. So just check whether this pipe is wired to DDI E and whether
10094          * the PCH transcoder is on.
10095          */
10096         if (INTEL_INFO(dev)->gen < 9 &&
10097             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10098                 pipe_config->has_pch_encoder = true;
10099
10100                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10101                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10102                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10103
10104                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10105         }
10106 }
10107
10108 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10109                                     struct intel_crtc_state *pipe_config)
10110 {
10111         struct drm_device *dev = crtc->base.dev;
10112         struct drm_i915_private *dev_priv = to_i915(dev);
10113         enum intel_display_power_domain power_domain;
10114         unsigned long power_domain_mask;
10115         bool active;
10116
10117         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10118         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10119                 return false;
10120         power_domain_mask = BIT(power_domain);
10121
10122         pipe_config->shared_dpll = NULL;
10123
10124         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10125
10126         if (IS_BROXTON(dev_priv) &&
10127             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
10128                 WARN_ON(active);
10129                 active = true;
10130         }
10131
10132         if (!active)
10133                 goto out;
10134
10135         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10136                 haswell_get_ddi_port_state(crtc, pipe_config);
10137                 intel_get_pipe_timings(crtc, pipe_config);
10138         }
10139
10140         intel_get_pipe_src_size(crtc, pipe_config);
10141
10142         pipe_config->gamma_mode =
10143                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10144
10145         if (INTEL_INFO(dev)->gen >= 9) {
10146                 skl_init_scalers(dev, crtc, pipe_config);
10147         }
10148
10149         if (INTEL_INFO(dev)->gen >= 9) {
10150                 pipe_config->scaler_state.scaler_id = -1;
10151                 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10152         }
10153
10154         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10155         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10156                 power_domain_mask |= BIT(power_domain);
10157                 if (INTEL_INFO(dev)->gen >= 9)
10158                         skylake_get_pfit_config(crtc, pipe_config);
10159                 else
10160                         ironlake_get_pfit_config(crtc, pipe_config);
10161         }
10162
10163         if (IS_HASWELL(dev))
10164                 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10165                         (I915_READ(IPS_CTL) & IPS_ENABLE);
10166
10167         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10168             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10169                 pipe_config->pixel_multiplier =
10170                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10171         } else {
10172                 pipe_config->pixel_multiplier = 1;
10173         }
10174
10175 out:
10176         for_each_power_domain(power_domain, power_domain_mask)
10177                 intel_display_power_put(dev_priv, power_domain);
10178
10179         return active;
10180 }
10181
10182 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10183                                const struct intel_plane_state *plane_state)
10184 {
10185         struct drm_device *dev = crtc->dev;
10186         struct drm_i915_private *dev_priv = to_i915(dev);
10187         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10188         uint32_t cntl = 0, size = 0;
10189
10190         if (plane_state && plane_state->visible) {
10191                 unsigned int width = plane_state->base.crtc_w;
10192                 unsigned int height = plane_state->base.crtc_h;
10193                 unsigned int stride = roundup_pow_of_two(width) * 4;
10194
10195                 switch (stride) {
10196                 default:
10197                         WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10198                                   width, stride);
10199                         stride = 256;
10200                         /* fallthrough */
10201                 case 256:
10202                 case 512:
10203                 case 1024:
10204                 case 2048:
10205                         break;
10206                 }
10207
10208                 cntl |= CURSOR_ENABLE |
10209                         CURSOR_GAMMA_ENABLE |
10210                         CURSOR_FORMAT_ARGB |
10211                         CURSOR_STRIDE(stride);
10212
10213                 size = (height << 12) | width;
10214         }
10215
10216         if (intel_crtc->cursor_cntl != 0 &&
10217             (intel_crtc->cursor_base != base ||
10218              intel_crtc->cursor_size != size ||
10219              intel_crtc->cursor_cntl != cntl)) {
10220                 /* On these chipsets we can only modify the base/size/stride
10221                  * whilst the cursor is disabled.
10222                  */
10223                 I915_WRITE(CURCNTR(PIPE_A), 0);
10224                 POSTING_READ(CURCNTR(PIPE_A));
10225                 intel_crtc->cursor_cntl = 0;
10226         }
10227
10228         if (intel_crtc->cursor_base != base) {
10229                 I915_WRITE(CURBASE(PIPE_A), base);
10230                 intel_crtc->cursor_base = base;
10231         }
10232
10233         if (intel_crtc->cursor_size != size) {
10234                 I915_WRITE(CURSIZE, size);
10235                 intel_crtc->cursor_size = size;
10236         }
10237
10238         if (intel_crtc->cursor_cntl != cntl) {
10239                 I915_WRITE(CURCNTR(PIPE_A), cntl);
10240                 POSTING_READ(CURCNTR(PIPE_A));
10241                 intel_crtc->cursor_cntl = cntl;
10242         }
10243 }
10244
10245 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10246                                const struct intel_plane_state *plane_state)
10247 {
10248         struct drm_device *dev = crtc->dev;
10249         struct drm_i915_private *dev_priv = to_i915(dev);
10250         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10251         int pipe = intel_crtc->pipe;
10252         uint32_t cntl = 0;
10253
10254         if (plane_state && plane_state->visible) {
10255                 cntl = MCURSOR_GAMMA_ENABLE;
10256                 switch (plane_state->base.crtc_w) {
10257                         case 64:
10258                                 cntl |= CURSOR_MODE_64_ARGB_AX;
10259                                 break;
10260                         case 128:
10261                                 cntl |= CURSOR_MODE_128_ARGB_AX;
10262                                 break;
10263                         case 256:
10264                                 cntl |= CURSOR_MODE_256_ARGB_AX;
10265                                 break;
10266                         default:
10267                                 MISSING_CASE(plane_state->base.crtc_w);
10268                                 return;
10269                 }
10270                 cntl |= pipe << 28; /* Connect to correct pipe */
10271
10272                 if (HAS_DDI(dev))
10273                         cntl |= CURSOR_PIPE_CSC_ENABLE;
10274
10275                 if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10276                         cntl |= CURSOR_ROTATE_180;
10277         }
10278
10279         if (intel_crtc->cursor_cntl != cntl) {
10280                 I915_WRITE(CURCNTR(pipe), cntl);
10281                 POSTING_READ(CURCNTR(pipe));
10282                 intel_crtc->cursor_cntl = cntl;
10283         }
10284
10285         /* and commit changes on next vblank */
10286         I915_WRITE(CURBASE(pipe), base);
10287         POSTING_READ(CURBASE(pipe));
10288
10289         intel_crtc->cursor_base = base;
10290 }
10291
10292 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10293 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10294                                      const struct intel_plane_state *plane_state)
10295 {
10296         struct drm_device *dev = crtc->dev;
10297         struct drm_i915_private *dev_priv = to_i915(dev);
10298         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10299         int pipe = intel_crtc->pipe;
10300         u32 base = intel_crtc->cursor_addr;
10301         u32 pos = 0;
10302
10303         if (plane_state) {
10304                 int x = plane_state->base.crtc_x;
10305                 int y = plane_state->base.crtc_y;
10306
10307                 if (x < 0) {
10308                         pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10309                         x = -x;
10310                 }
10311                 pos |= x << CURSOR_X_SHIFT;
10312
10313                 if (y < 0) {
10314                         pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10315                         y = -y;
10316                 }
10317                 pos |= y << CURSOR_Y_SHIFT;
10318
10319                 /* ILK+ do this automagically */
10320                 if (HAS_GMCH_DISPLAY(dev) &&
10321                     plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10322                         base += (plane_state->base.crtc_h *
10323                                  plane_state->base.crtc_w - 1) * 4;
10324                 }
10325         }
10326
10327         I915_WRITE(CURPOS(pipe), pos);
10328
10329         if (IS_845G(dev) || IS_I865G(dev))
10330                 i845_update_cursor(crtc, base, plane_state);
10331         else
10332                 i9xx_update_cursor(crtc, base, plane_state);
10333 }
10334
10335 static bool cursor_size_ok(struct drm_device *dev,
10336                            uint32_t width, uint32_t height)
10337 {
10338         if (width == 0 || height == 0)
10339                 return false;
10340
10341         /*
10342          * 845g/865g are special in that they are only limited by
10343          * the width of their cursors, the height is arbitrary up to
10344          * the precision of the register. Everything else requires
10345          * square cursors, limited to a few power-of-two sizes.
10346          */
10347         if (IS_845G(dev) || IS_I865G(dev)) {
10348                 if ((width & 63) != 0)
10349                         return false;
10350
10351                 if (width > (IS_845G(dev) ? 64 : 512))
10352                         return false;
10353
10354                 if (height > 1023)
10355                         return false;
10356         } else {
10357                 switch (width | height) {
10358                 case 256:
10359                 case 128:
10360                         if (IS_GEN2(dev))
10361                                 return false;
10362                 case 64:
10363                         break;
10364                 default:
10365                         return false;
10366                 }
10367         }
10368
10369         return true;
10370 }
10371
10372 /* VESA 640x480x72Hz mode to set on the pipe */
10373 static struct drm_display_mode load_detect_mode = {
10374         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10375                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10376 };
10377
10378 struct drm_framebuffer *
10379 __intel_framebuffer_create(struct drm_device *dev,
10380                            struct drm_mode_fb_cmd2 *mode_cmd,
10381                            struct drm_i915_gem_object *obj)
10382 {
10383         struct intel_framebuffer *intel_fb;
10384         int ret;
10385
10386         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10387         if (!intel_fb)
10388                 return ERR_PTR(-ENOMEM);
10389
10390         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10391         if (ret)
10392                 goto err;
10393
10394         return &intel_fb->base;
10395
10396 err:
10397         kfree(intel_fb);
10398         return ERR_PTR(ret);
10399 }
10400
10401 static struct drm_framebuffer *
10402 intel_framebuffer_create(struct drm_device *dev,
10403                          struct drm_mode_fb_cmd2 *mode_cmd,
10404                          struct drm_i915_gem_object *obj)
10405 {
10406         struct drm_framebuffer *fb;
10407         int ret;
10408
10409         ret = i915_mutex_lock_interruptible(dev);
10410         if (ret)
10411                 return ERR_PTR(ret);
10412         fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10413         mutex_unlock(&dev->struct_mutex);
10414
10415         return fb;
10416 }
10417
10418 static u32
10419 intel_framebuffer_pitch_for_width(int width, int bpp)
10420 {
10421         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10422         return ALIGN(pitch, 64);
10423 }
10424
10425 static u32
10426 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10427 {
10428         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10429         return PAGE_ALIGN(pitch * mode->vdisplay);
10430 }
10431
10432 static struct drm_framebuffer *
10433 intel_framebuffer_create_for_mode(struct drm_device *dev,
10434                                   struct drm_display_mode *mode,
10435                                   int depth, int bpp)
10436 {
10437         struct drm_framebuffer *fb;
10438         struct drm_i915_gem_object *obj;
10439         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10440
10441         obj = i915_gem_object_create(dev,
10442                                     intel_framebuffer_size_for_mode(mode, bpp));
10443         if (IS_ERR(obj))
10444                 return ERR_CAST(obj);
10445
10446         mode_cmd.width = mode->hdisplay;
10447         mode_cmd.height = mode->vdisplay;
10448         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10449                                                                 bpp);
10450         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10451
10452         fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10453         if (IS_ERR(fb))
10454                 drm_gem_object_unreference_unlocked(&obj->base);
10455
10456         return fb;
10457 }
10458
10459 static struct drm_framebuffer *
10460 mode_fits_in_fbdev(struct drm_device *dev,
10461                    struct drm_display_mode *mode)
10462 {
10463 #ifdef CONFIG_DRM_FBDEV_EMULATION
10464         struct drm_i915_private *dev_priv = to_i915(dev);
10465         struct drm_i915_gem_object *obj;
10466         struct drm_framebuffer *fb;
10467
10468         if (!dev_priv->fbdev)
10469                 return NULL;
10470
10471         if (!dev_priv->fbdev->fb)
10472                 return NULL;
10473
10474         obj = dev_priv->fbdev->fb->obj;
10475         BUG_ON(!obj);
10476
10477         fb = &dev_priv->fbdev->fb->base;
10478         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10479                                                                fb->bits_per_pixel))
10480                 return NULL;
10481
10482         if (obj->base.size < mode->vdisplay * fb->pitches[0])
10483                 return NULL;
10484
10485         drm_framebuffer_reference(fb);
10486         return fb;
10487 #else
10488         return NULL;
10489 #endif
10490 }
10491
10492 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10493                                            struct drm_crtc *crtc,
10494                                            struct drm_display_mode *mode,
10495                                            struct drm_framebuffer *fb,
10496                                            int x, int y)
10497 {
10498         struct drm_plane_state *plane_state;
10499         int hdisplay, vdisplay;
10500         int ret;
10501
10502         plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10503         if (IS_ERR(plane_state))
10504                 return PTR_ERR(plane_state);
10505
10506         if (mode)
10507                 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10508         else
10509                 hdisplay = vdisplay = 0;
10510
10511         ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10512         if (ret)
10513                 return ret;
10514         drm_atomic_set_fb_for_plane(plane_state, fb);
10515         plane_state->crtc_x = 0;
10516         plane_state->crtc_y = 0;
10517         plane_state->crtc_w = hdisplay;
10518         plane_state->crtc_h = vdisplay;
10519         plane_state->src_x = x << 16;
10520         plane_state->src_y = y << 16;
10521         plane_state->src_w = hdisplay << 16;
10522         plane_state->src_h = vdisplay << 16;
10523
10524         return 0;
10525 }
10526
10527 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10528                                 struct drm_display_mode *mode,
10529                                 struct intel_load_detect_pipe *old,
10530                                 struct drm_modeset_acquire_ctx *ctx)
10531 {
10532         struct intel_crtc *intel_crtc;
10533         struct intel_encoder *intel_encoder =
10534                 intel_attached_encoder(connector);
10535         struct drm_crtc *possible_crtc;
10536         struct drm_encoder *encoder = &intel_encoder->base;
10537         struct drm_crtc *crtc = NULL;
10538         struct drm_device *dev = encoder->dev;
10539         struct drm_framebuffer *fb;
10540         struct drm_mode_config *config = &dev->mode_config;
10541         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10542         struct drm_connector_state *connector_state;
10543         struct intel_crtc_state *crtc_state;
10544         int ret, i = -1;
10545
10546         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10547                       connector->base.id, connector->name,
10548                       encoder->base.id, encoder->name);
10549
10550         old->restore_state = NULL;
10551
10552 retry:
10553         ret = drm_modeset_lock(&config->connection_mutex, ctx);
10554         if (ret)
10555                 goto fail;
10556
10557         /*
10558          * Algorithm gets a little messy:
10559          *
10560          *   - if the connector already has an assigned crtc, use it (but make
10561          *     sure it's on first)
10562          *
10563          *   - try to find the first unused crtc that can drive this connector,
10564          *     and use that if we find one
10565          */
10566
10567         /* See if we already have a CRTC for this connector */
10568         if (connector->state->crtc) {
10569                 crtc = connector->state->crtc;
10570
10571                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10572                 if (ret)
10573                         goto fail;
10574
10575                 /* Make sure the crtc and connector are running */
10576                 goto found;
10577         }
10578
10579         /* Find an unused one (if possible) */
10580         for_each_crtc(dev, possible_crtc) {
10581                 i++;
10582                 if (!(encoder->possible_crtcs & (1 << i)))
10583                         continue;
10584
10585                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10586                 if (ret)
10587                         goto fail;
10588
10589                 if (possible_crtc->state->enable) {
10590                         drm_modeset_unlock(&possible_crtc->mutex);
10591                         continue;
10592                 }
10593
10594                 crtc = possible_crtc;
10595                 break;
10596         }
10597
10598         /*
10599          * If we didn't find an unused CRTC, don't use any.
10600          */
10601         if (!crtc) {
10602                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10603                 goto fail;
10604         }
10605
10606 found:
10607         intel_crtc = to_intel_crtc(crtc);
10608
10609         ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10610         if (ret)
10611                 goto fail;
10612
10613         state = drm_atomic_state_alloc(dev);
10614         restore_state = drm_atomic_state_alloc(dev);
10615         if (!state || !restore_state) {
10616                 ret = -ENOMEM;
10617                 goto fail;
10618         }
10619
10620         state->acquire_ctx = ctx;
10621         restore_state->acquire_ctx = ctx;
10622
10623         connector_state = drm_atomic_get_connector_state(state, connector);
10624         if (IS_ERR(connector_state)) {
10625                 ret = PTR_ERR(connector_state);
10626                 goto fail;
10627         }
10628
10629         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10630         if (ret)
10631                 goto fail;
10632
10633         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10634         if (IS_ERR(crtc_state)) {
10635                 ret = PTR_ERR(crtc_state);
10636                 goto fail;
10637         }
10638
10639         crtc_state->base.active = crtc_state->base.enable = true;
10640
10641         if (!mode)
10642                 mode = &load_detect_mode;
10643
10644         /* We need a framebuffer large enough to accommodate all accesses
10645          * that the plane may generate whilst we perform load detection.
10646          * We can not rely on the fbcon either being present (we get called
10647          * during its initialisation to detect all boot displays, or it may
10648          * not even exist) or that it is large enough to satisfy the
10649          * requested mode.
10650          */
10651         fb = mode_fits_in_fbdev(dev, mode);
10652         if (fb == NULL) {
10653                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10654                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10655         } else
10656                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10657         if (IS_ERR(fb)) {
10658                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10659                 goto fail;
10660         }
10661
10662         ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10663         if (ret)
10664                 goto fail;
10665
10666         drm_framebuffer_unreference(fb);
10667
10668         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10669         if (ret)
10670                 goto fail;
10671
10672         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10673         if (!ret)
10674                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10675         if (!ret)
10676                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10677         if (ret) {
10678                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10679                 goto fail;
10680         }
10681
10682         ret = drm_atomic_commit(state);
10683         if (ret) {
10684                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10685                 goto fail;
10686         }
10687
10688         old->restore_state = restore_state;
10689
10690         /* let the connector get through one full cycle before testing */
10691         intel_wait_for_vblank(dev, intel_crtc->pipe);
10692         return true;
10693
10694 fail:
10695         drm_atomic_state_free(state);
10696         drm_atomic_state_free(restore_state);
10697         restore_state = state = NULL;
10698
10699         if (ret == -EDEADLK) {
10700                 drm_modeset_backoff(ctx);
10701                 goto retry;
10702         }
10703
10704         return false;
10705 }
10706
10707 void intel_release_load_detect_pipe(struct drm_connector *connector,
10708                                     struct intel_load_detect_pipe *old,
10709                                     struct drm_modeset_acquire_ctx *ctx)
10710 {
10711         struct intel_encoder *intel_encoder =
10712                 intel_attached_encoder(connector);
10713         struct drm_encoder *encoder = &intel_encoder->base;
10714         struct drm_atomic_state *state = old->restore_state;
10715         int ret;
10716
10717         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10718                       connector->base.id, connector->name,
10719                       encoder->base.id, encoder->name);
10720
10721         if (!state)
10722                 return;
10723
10724         ret = drm_atomic_commit(state);
10725         if (ret) {
10726                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10727                 drm_atomic_state_free(state);
10728         }
10729 }
10730
10731 static int i9xx_pll_refclk(struct drm_device *dev,
10732                            const struct intel_crtc_state *pipe_config)
10733 {
10734         struct drm_i915_private *dev_priv = to_i915(dev);
10735         u32 dpll = pipe_config->dpll_hw_state.dpll;
10736
10737         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10738                 return dev_priv->vbt.lvds_ssc_freq;
10739         else if (HAS_PCH_SPLIT(dev))
10740                 return 120000;
10741         else if (!IS_GEN2(dev))
10742                 return 96000;
10743         else
10744                 return 48000;
10745 }
10746
10747 /* Returns the clock of the currently programmed mode of the given pipe. */
10748 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10749                                 struct intel_crtc_state *pipe_config)
10750 {
10751         struct drm_device *dev = crtc->base.dev;
10752         struct drm_i915_private *dev_priv = to_i915(dev);
10753         int pipe = pipe_config->cpu_transcoder;
10754         u32 dpll = pipe_config->dpll_hw_state.dpll;
10755         u32 fp;
10756         struct dpll clock;
10757         int port_clock;
10758         int refclk = i9xx_pll_refclk(dev, pipe_config);
10759
10760         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10761                 fp = pipe_config->dpll_hw_state.fp0;
10762         else
10763                 fp = pipe_config->dpll_hw_state.fp1;
10764
10765         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10766         if (IS_PINEVIEW(dev)) {
10767                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10768                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10769         } else {
10770                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10771                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10772         }
10773
10774         if (!IS_GEN2(dev)) {
10775                 if (IS_PINEVIEW(dev))
10776                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10777                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10778                 else
10779                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10780                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10781
10782                 switch (dpll & DPLL_MODE_MASK) {
10783                 case DPLLB_MODE_DAC_SERIAL:
10784                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10785                                 5 : 10;
10786                         break;
10787                 case DPLLB_MODE_LVDS:
10788                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10789                                 7 : 14;
10790                         break;
10791                 default:
10792                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10793                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10794                         return;
10795                 }
10796
10797                 if (IS_PINEVIEW(dev))
10798                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10799                 else
10800                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10801         } else {
10802                 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10803                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10804
10805                 if (is_lvds) {
10806                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10807                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10808
10809                         if (lvds & LVDS_CLKB_POWER_UP)
10810                                 clock.p2 = 7;
10811                         else
10812                                 clock.p2 = 14;
10813                 } else {
10814                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10815                                 clock.p1 = 2;
10816                         else {
10817                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10818                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10819                         }
10820                         if (dpll & PLL_P2_DIVIDE_BY_4)
10821                                 clock.p2 = 4;
10822                         else
10823                                 clock.p2 = 2;
10824                 }
10825
10826                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10827         }
10828
10829         /*
10830          * This value includes pixel_multiplier. We will use
10831          * port_clock to compute adjusted_mode.crtc_clock in the
10832          * encoder's get_config() function.
10833          */
10834         pipe_config->port_clock = port_clock;
10835 }
10836
10837 int intel_dotclock_calculate(int link_freq,
10838                              const struct intel_link_m_n *m_n)
10839 {
10840         /*
10841          * The calculation for the data clock is:
10842          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10843          * But we want to avoid losing precison if possible, so:
10844          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10845          *
10846          * and the link clock is simpler:
10847          * link_clock = (m * link_clock) / n
10848          */
10849
10850         if (!m_n->link_n)
10851                 return 0;
10852
10853         return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10854 }
10855
10856 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10857                                    struct intel_crtc_state *pipe_config)
10858 {
10859         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10860
10861         /* read out port_clock from the DPLL */
10862         i9xx_crtc_clock_get(crtc, pipe_config);
10863
10864         /*
10865          * In case there is an active pipe without active ports,
10866          * we may need some idea for the dotclock anyway.
10867          * Calculate one based on the FDI configuration.
10868          */
10869         pipe_config->base.adjusted_mode.crtc_clock =
10870                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10871                                          &pipe_config->fdi_m_n);
10872 }
10873
10874 /** Returns the currently programmed mode of the given pipe. */
10875 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10876                                              struct drm_crtc *crtc)
10877 {
10878         struct drm_i915_private *dev_priv = to_i915(dev);
10879         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10880         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10881         struct drm_display_mode *mode;
10882         struct intel_crtc_state *pipe_config;
10883         int htot = I915_READ(HTOTAL(cpu_transcoder));
10884         int hsync = I915_READ(HSYNC(cpu_transcoder));
10885         int vtot = I915_READ(VTOTAL(cpu_transcoder));
10886         int vsync = I915_READ(VSYNC(cpu_transcoder));
10887         enum pipe pipe = intel_crtc->pipe;
10888
10889         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10890         if (!mode)
10891                 return NULL;
10892
10893         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10894         if (!pipe_config) {
10895                 kfree(mode);
10896                 return NULL;
10897         }
10898
10899         /*
10900          * Construct a pipe_config sufficient for getting the clock info
10901          * back out of crtc_clock_get.
10902          *
10903          * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10904          * to use a real value here instead.
10905          */
10906         pipe_config->cpu_transcoder = (enum transcoder) pipe;
10907         pipe_config->pixel_multiplier = 1;
10908         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10909         pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10910         pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10911         i9xx_crtc_clock_get(intel_crtc, pipe_config);
10912
10913         mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10914         mode->hdisplay = (htot & 0xffff) + 1;
10915         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10916         mode->hsync_start = (hsync & 0xffff) + 1;
10917         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10918         mode->vdisplay = (vtot & 0xffff) + 1;
10919         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10920         mode->vsync_start = (vsync & 0xffff) + 1;
10921         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10922
10923         drm_mode_set_name(mode);
10924
10925         kfree(pipe_config);
10926
10927         return mode;
10928 }
10929
10930 static void intel_crtc_destroy(struct drm_crtc *crtc)
10931 {
10932         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10933         struct drm_device *dev = crtc->dev;
10934         struct intel_flip_work *work;
10935
10936         spin_lock_irq(&dev->event_lock);
10937         work = intel_crtc->flip_work;
10938         intel_crtc->flip_work = NULL;
10939         spin_unlock_irq(&dev->event_lock);
10940
10941         if (work) {
10942                 cancel_work_sync(&work->mmio_work);
10943                 cancel_work_sync(&work->unpin_work);
10944                 kfree(work);
10945         }
10946
10947         drm_crtc_cleanup(crtc);
10948
10949         kfree(intel_crtc);
10950 }
10951
10952 static void intel_unpin_work_fn(struct work_struct *__work)
10953 {
10954         struct intel_flip_work *work =
10955                 container_of(__work, struct intel_flip_work, unpin_work);
10956         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10957         struct drm_device *dev = crtc->base.dev;
10958         struct drm_plane *primary = crtc->base.primary;
10959
10960         if (is_mmio_work(work))
10961                 flush_work(&work->mmio_work);
10962
10963         mutex_lock(&dev->struct_mutex);
10964         intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10965         drm_gem_object_unreference(&work->pending_flip_obj->base);
10966
10967         if (work->flip_queued_req)
10968                 i915_gem_request_assign(&work->flip_queued_req, NULL);
10969         mutex_unlock(&dev->struct_mutex);
10970
10971         intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10972         intel_fbc_post_update(crtc);
10973         drm_framebuffer_unreference(work->old_fb);
10974
10975         BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10976         atomic_dec(&crtc->unpin_work_count);
10977
10978         kfree(work);
10979 }
10980
10981 /* Is 'a' after or equal to 'b'? */
10982 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10983 {
10984         return !((a - b) & 0x80000000);
10985 }
10986
10987 static bool __pageflip_finished_cs(struct intel_crtc *crtc,
10988                                    struct intel_flip_work *work)
10989 {
10990         struct drm_device *dev = crtc->base.dev;
10991         struct drm_i915_private *dev_priv = to_i915(dev);
10992         unsigned reset_counter;
10993
10994         reset_counter = i915_reset_counter(&dev_priv->gpu_error);
10995         if (crtc->reset_counter != reset_counter)
10996                 return true;
10997
10998         /*
10999          * The relevant registers doen't exist on pre-ctg.
11000          * As the flip done interrupt doesn't trigger for mmio
11001          * flips on gmch platforms, a flip count check isn't
11002          * really needed there. But since ctg has the registers,
11003          * include it in the check anyway.
11004          */
11005         if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11006                 return true;
11007
11008         /*
11009          * BDW signals flip done immediately if the plane
11010          * is disabled, even if the plane enable is already
11011          * armed to occur at the next vblank :(
11012          */
11013
11014         /*
11015          * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11016          * used the same base address. In that case the mmio flip might
11017          * have completed, but the CS hasn't even executed the flip yet.
11018          *
11019          * A flip count check isn't enough as the CS might have updated
11020          * the base address just after start of vblank, but before we
11021          * managed to process the interrupt. This means we'd complete the
11022          * CS flip too soon.
11023          *
11024          * Combining both checks should get us a good enough result. It may
11025          * still happen that the CS flip has been executed, but has not
11026          * yet actually completed. But in case the base address is the same
11027          * anyway, we don't really care.
11028          */
11029         return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11030                 crtc->flip_work->gtt_offset &&
11031                 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11032                                     crtc->flip_work->flip_count);
11033 }
11034
11035 static bool
11036 __pageflip_finished_mmio(struct intel_crtc *crtc,
11037                                struct intel_flip_work *work)
11038 {
11039         /*
11040          * MMIO work completes when vblank is different from
11041          * flip_queued_vblank.
11042          *
11043          * Reset counter value doesn't matter, this is handled by
11044          * i915_wait_request finishing early, so no need to handle
11045          * reset here.
11046          */
11047         return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11048 }
11049
11050
11051 static bool pageflip_finished(struct intel_crtc *crtc,
11052                               struct intel_flip_work *work)
11053 {
11054         if (!atomic_read(&work->pending))
11055                 return false;
11056
11057         smp_rmb();
11058
11059         if (is_mmio_work(work))
11060                 return __pageflip_finished_mmio(crtc, work);
11061         else
11062                 return __pageflip_finished_cs(crtc, work);
11063 }
11064
11065 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11066 {
11067         struct drm_device *dev = &dev_priv->drm;
11068         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11069         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11070         struct intel_flip_work *work;
11071         unsigned long flags;
11072
11073         /* Ignore early vblank irqs */
11074         if (!crtc)
11075                 return;
11076
11077         /*
11078          * This is called both by irq handlers and the reset code (to complete
11079          * lost pageflips) so needs the full irqsave spinlocks.
11080          */
11081         spin_lock_irqsave(&dev->event_lock, flags);
11082         work = intel_crtc->flip_work;
11083
11084         if (work != NULL &&
11085             !is_mmio_work(work) &&
11086             pageflip_finished(intel_crtc, work))
11087                 page_flip_completed(intel_crtc);
11088
11089         spin_unlock_irqrestore(&dev->event_lock, flags);
11090 }
11091
11092 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11093 {
11094         struct drm_device *dev = &dev_priv->drm;
11095         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11096         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11097         struct intel_flip_work *work;
11098         unsigned long flags;
11099
11100         /* Ignore early vblank irqs */
11101         if (!crtc)
11102                 return;
11103
11104         /*
11105          * This is called both by irq handlers and the reset code (to complete
11106          * lost pageflips) so needs the full irqsave spinlocks.
11107          */
11108         spin_lock_irqsave(&dev->event_lock, flags);
11109         work = intel_crtc->flip_work;
11110
11111         if (work != NULL &&
11112             is_mmio_work(work) &&
11113             pageflip_finished(intel_crtc, work))
11114                 page_flip_completed(intel_crtc);
11115
11116         spin_unlock_irqrestore(&dev->event_lock, flags);
11117 }
11118
11119 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11120                                                struct intel_flip_work *work)
11121 {
11122         work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11123
11124         /* Ensure that the work item is consistent when activating it ... */
11125         smp_mb__before_atomic();
11126         atomic_set(&work->pending, 1);
11127 }
11128
11129 static int intel_gen2_queue_flip(struct drm_device *dev,
11130                                  struct drm_crtc *crtc,
11131                                  struct drm_framebuffer *fb,
11132                                  struct drm_i915_gem_object *obj,
11133                                  struct drm_i915_gem_request *req,
11134                                  uint32_t flags)
11135 {
11136         struct intel_engine_cs *engine = req->engine;
11137         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11138         u32 flip_mask;
11139         int ret;
11140
11141         ret = intel_ring_begin(req, 6);
11142         if (ret)
11143                 return ret;
11144
11145         /* Can't queue multiple flips, so wait for the previous
11146          * one to finish before executing the next.
11147          */
11148         if (intel_crtc->plane)
11149                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11150         else
11151                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11152         intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11153         intel_ring_emit(engine, MI_NOOP);
11154         intel_ring_emit(engine, MI_DISPLAY_FLIP |
11155                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11156         intel_ring_emit(engine, fb->pitches[0]);
11157         intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11158         intel_ring_emit(engine, 0); /* aux display base address, unused */
11159
11160         return 0;
11161 }
11162
11163 static int intel_gen3_queue_flip(struct drm_device *dev,
11164                                  struct drm_crtc *crtc,
11165                                  struct drm_framebuffer *fb,
11166                                  struct drm_i915_gem_object *obj,
11167                                  struct drm_i915_gem_request *req,
11168                                  uint32_t flags)
11169 {
11170         struct intel_engine_cs *engine = req->engine;
11171         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11172         u32 flip_mask;
11173         int ret;
11174
11175         ret = intel_ring_begin(req, 6);
11176         if (ret)
11177                 return ret;
11178
11179         if (intel_crtc->plane)
11180                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11181         else
11182                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11183         intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11184         intel_ring_emit(engine, MI_NOOP);
11185         intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11186                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11187         intel_ring_emit(engine, fb->pitches[0]);
11188         intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11189         intel_ring_emit(engine, MI_NOOP);
11190
11191         return 0;
11192 }
11193
11194 static int intel_gen4_queue_flip(struct drm_device *dev,
11195                                  struct drm_crtc *crtc,
11196                                  struct drm_framebuffer *fb,
11197                                  struct drm_i915_gem_object *obj,
11198                                  struct drm_i915_gem_request *req,
11199                                  uint32_t flags)
11200 {
11201         struct intel_engine_cs *engine = req->engine;
11202         struct drm_i915_private *dev_priv = to_i915(dev);
11203         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11204         uint32_t pf, pipesrc;
11205         int ret;
11206
11207         ret = intel_ring_begin(req, 4);
11208         if (ret)
11209                 return ret;
11210
11211         /* i965+ uses the linear or tiled offsets from the
11212          * Display Registers (which do not change across a page-flip)
11213          * so we need only reprogram the base address.
11214          */
11215         intel_ring_emit(engine, MI_DISPLAY_FLIP |
11216                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11217         intel_ring_emit(engine, fb->pitches[0]);
11218         intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11219                         obj->tiling_mode);
11220
11221         /* XXX Enabling the panel-fitter across page-flip is so far
11222          * untested on non-native modes, so ignore it for now.
11223          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11224          */
11225         pf = 0;
11226         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11227         intel_ring_emit(engine, pf | pipesrc);
11228
11229         return 0;
11230 }
11231
11232 static int intel_gen6_queue_flip(struct drm_device *dev,
11233                                  struct drm_crtc *crtc,
11234                                  struct drm_framebuffer *fb,
11235                                  struct drm_i915_gem_object *obj,
11236                                  struct drm_i915_gem_request *req,
11237                                  uint32_t flags)
11238 {
11239         struct intel_engine_cs *engine = req->engine;
11240         struct drm_i915_private *dev_priv = to_i915(dev);
11241         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11242         uint32_t pf, pipesrc;
11243         int ret;
11244
11245         ret = intel_ring_begin(req, 4);
11246         if (ret)
11247                 return ret;
11248
11249         intel_ring_emit(engine, MI_DISPLAY_FLIP |
11250                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11251         intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11252         intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11253
11254         /* Contrary to the suggestions in the documentation,
11255          * "Enable Panel Fitter" does not seem to be required when page
11256          * flipping with a non-native mode, and worse causes a normal
11257          * modeset to fail.
11258          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11259          */
11260         pf = 0;
11261         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11262         intel_ring_emit(engine, pf | pipesrc);
11263
11264         return 0;
11265 }
11266
11267 static int intel_gen7_queue_flip(struct drm_device *dev,
11268                                  struct drm_crtc *crtc,
11269                                  struct drm_framebuffer *fb,
11270                                  struct drm_i915_gem_object *obj,
11271                                  struct drm_i915_gem_request *req,
11272                                  uint32_t flags)
11273 {
11274         struct intel_engine_cs *engine = req->engine;
11275         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11276         uint32_t plane_bit = 0;
11277         int len, ret;
11278
11279         switch (intel_crtc->plane) {
11280         case PLANE_A:
11281                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11282                 break;
11283         case PLANE_B:
11284                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11285                 break;
11286         case PLANE_C:
11287                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11288                 break;
11289         default:
11290                 WARN_ONCE(1, "unknown plane in flip command\n");
11291                 return -ENODEV;
11292         }
11293
11294         len = 4;
11295         if (engine->id == RCS) {
11296                 len += 6;
11297                 /*
11298                  * On Gen 8, SRM is now taking an extra dword to accommodate
11299                  * 48bits addresses, and we need a NOOP for the batch size to
11300                  * stay even.
11301                  */
11302                 if (IS_GEN8(dev))
11303                         len += 2;
11304         }
11305
11306         /*
11307          * BSpec MI_DISPLAY_FLIP for IVB:
11308          * "The full packet must be contained within the same cache line."
11309          *
11310          * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11311          * cacheline, if we ever start emitting more commands before
11312          * the MI_DISPLAY_FLIP we may need to first emit everything else,
11313          * then do the cacheline alignment, and finally emit the
11314          * MI_DISPLAY_FLIP.
11315          */
11316         ret = intel_ring_cacheline_align(req);
11317         if (ret)
11318                 return ret;
11319
11320         ret = intel_ring_begin(req, len);
11321         if (ret)
11322                 return ret;
11323
11324         /* Unmask the flip-done completion message. Note that the bspec says that
11325          * we should do this for both the BCS and RCS, and that we must not unmask
11326          * more than one flip event at any time (or ensure that one flip message
11327          * can be sent by waiting for flip-done prior to queueing new flips).
11328          * Experimentation says that BCS works despite DERRMR masking all
11329          * flip-done completion events and that unmasking all planes at once
11330          * for the RCS also doesn't appear to drop events. Setting the DERRMR
11331          * to zero does lead to lockups within MI_DISPLAY_FLIP.
11332          */
11333         if (engine->id == RCS) {
11334                 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11335                 intel_ring_emit_reg(engine, DERRMR);
11336                 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11337                                           DERRMR_PIPEB_PRI_FLIP_DONE |
11338                                           DERRMR_PIPEC_PRI_FLIP_DONE));
11339                 if (IS_GEN8(dev))
11340                         intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11341                                               MI_SRM_LRM_GLOBAL_GTT);
11342                 else
11343                         intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11344                                               MI_SRM_LRM_GLOBAL_GTT);
11345                 intel_ring_emit_reg(engine, DERRMR);
11346                 intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11347                 if (IS_GEN8(dev)) {
11348                         intel_ring_emit(engine, 0);
11349                         intel_ring_emit(engine, MI_NOOP);
11350                 }
11351         }
11352
11353         intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11354         intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11355         intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11356         intel_ring_emit(engine, (MI_NOOP));
11357
11358         return 0;
11359 }
11360
11361 static bool use_mmio_flip(struct intel_engine_cs *engine,
11362                           struct drm_i915_gem_object *obj)
11363 {
11364         struct reservation_object *resv;
11365
11366         /*
11367          * This is not being used for older platforms, because
11368          * non-availability of flip done interrupt forces us to use
11369          * CS flips. Older platforms derive flip done using some clever
11370          * tricks involving the flip_pending status bits and vblank irqs.
11371          * So using MMIO flips there would disrupt this mechanism.
11372          */
11373
11374         if (engine == NULL)
11375                 return true;
11376
11377         if (INTEL_GEN(engine->i915) < 5)
11378                 return false;
11379
11380         if (i915.use_mmio_flip < 0)
11381                 return false;
11382         else if (i915.use_mmio_flip > 0)
11383                 return true;
11384         else if (i915.enable_execlists)
11385                 return true;
11386
11387         resv = i915_gem_object_get_dmabuf_resv(obj);
11388         if (resv && !reservation_object_test_signaled_rcu(resv, false))
11389                 return true;
11390
11391         return engine != i915_gem_request_get_engine(obj->last_write_req);
11392 }
11393
11394 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11395                              unsigned int rotation,
11396                              struct intel_flip_work *work)
11397 {
11398         struct drm_device *dev = intel_crtc->base.dev;
11399         struct drm_i915_private *dev_priv = to_i915(dev);
11400         struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11401         const enum pipe pipe = intel_crtc->pipe;
11402         u32 ctl, stride, tile_height;
11403
11404         ctl = I915_READ(PLANE_CTL(pipe, 0));
11405         ctl &= ~PLANE_CTL_TILED_MASK;
11406         switch (fb->modifier[0]) {
11407         case DRM_FORMAT_MOD_NONE:
11408                 break;
11409         case I915_FORMAT_MOD_X_TILED:
11410                 ctl |= PLANE_CTL_TILED_X;
11411                 break;
11412         case I915_FORMAT_MOD_Y_TILED:
11413                 ctl |= PLANE_CTL_TILED_Y;
11414                 break;
11415         case I915_FORMAT_MOD_Yf_TILED:
11416                 ctl |= PLANE_CTL_TILED_YF;
11417                 break;
11418         default:
11419                 MISSING_CASE(fb->modifier[0]);
11420         }
11421
11422         /*
11423          * The stride is either expressed as a multiple of 64 bytes chunks for
11424          * linear buffers or in number of tiles for tiled buffers.
11425          */
11426         if (intel_rotation_90_or_270(rotation)) {
11427                 /* stride = Surface height in tiles */
11428                 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11429                 stride = DIV_ROUND_UP(fb->height, tile_height);
11430         } else {
11431                 stride = fb->pitches[0] /
11432                         intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11433                                                   fb->pixel_format);
11434         }
11435
11436         /*
11437          * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11438          * PLANE_SURF updates, the update is then guaranteed to be atomic.
11439          */
11440         I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11441         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11442
11443         I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11444         POSTING_READ(PLANE_SURF(pipe, 0));
11445 }
11446
11447 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11448                              struct intel_flip_work *work)
11449 {
11450         struct drm_device *dev = intel_crtc->base.dev;
11451         struct drm_i915_private *dev_priv = to_i915(dev);
11452         struct intel_framebuffer *intel_fb =
11453                 to_intel_framebuffer(intel_crtc->base.primary->fb);
11454         struct drm_i915_gem_object *obj = intel_fb->obj;
11455         i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11456         u32 dspcntr;
11457
11458         dspcntr = I915_READ(reg);
11459
11460         if (obj->tiling_mode != I915_TILING_NONE)
11461                 dspcntr |= DISPPLANE_TILED;
11462         else
11463                 dspcntr &= ~DISPPLANE_TILED;
11464
11465         I915_WRITE(reg, dspcntr);
11466
11467         I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11468         POSTING_READ(DSPSURF(intel_crtc->plane));
11469 }
11470
11471 static void intel_mmio_flip_work_func(struct work_struct *w)
11472 {
11473         struct intel_flip_work *work =
11474                 container_of(w, struct intel_flip_work, mmio_work);
11475         struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11476         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11477         struct intel_framebuffer *intel_fb =
11478                 to_intel_framebuffer(crtc->base.primary->fb);
11479         struct drm_i915_gem_object *obj = intel_fb->obj;
11480         struct reservation_object *resv;
11481
11482         if (work->flip_queued_req)
11483                 WARN_ON(__i915_wait_request(work->flip_queued_req,
11484                                             false, NULL,
11485                                             &dev_priv->rps.mmioflips));
11486
11487         /* For framebuffer backed by dmabuf, wait for fence */
11488         resv = i915_gem_object_get_dmabuf_resv(obj);
11489         if (resv)
11490                 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
11491                                                             MAX_SCHEDULE_TIMEOUT) < 0);
11492
11493         intel_pipe_update_start(crtc);
11494
11495         if (INTEL_GEN(dev_priv) >= 9)
11496                 skl_do_mmio_flip(crtc, work->rotation, work);
11497         else
11498                 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11499                 ilk_do_mmio_flip(crtc, work);
11500
11501         intel_pipe_update_end(crtc, work);
11502 }
11503
11504 static int intel_default_queue_flip(struct drm_device *dev,
11505                                     struct drm_crtc *crtc,
11506                                     struct drm_framebuffer *fb,
11507                                     struct drm_i915_gem_object *obj,
11508                                     struct drm_i915_gem_request *req,
11509                                     uint32_t flags)
11510 {
11511         return -ENODEV;
11512 }
11513
11514 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11515                                       struct intel_crtc *intel_crtc,
11516                                       struct intel_flip_work *work)
11517 {
11518         u32 addr, vblank;
11519
11520         if (!atomic_read(&work->pending))
11521                 return false;
11522
11523         smp_rmb();
11524
11525         vblank = intel_crtc_get_vblank_counter(intel_crtc);
11526         if (work->flip_ready_vblank == 0) {
11527                 if (work->flip_queued_req &&
11528                     !i915_gem_request_completed(work->flip_queued_req))
11529                         return false;
11530
11531                 work->flip_ready_vblank = vblank;
11532         }
11533
11534         if (vblank - work->flip_ready_vblank < 3)
11535                 return false;
11536
11537         /* Potential stall - if we see that the flip has happened,
11538          * assume a missed interrupt. */
11539         if (INTEL_GEN(dev_priv) >= 4)
11540                 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11541         else
11542                 addr = I915_READ(DSPADDR(intel_crtc->plane));
11543
11544         /* There is a potential issue here with a false positive after a flip
11545          * to the same address. We could address this by checking for a
11546          * non-incrementing frame counter.
11547          */
11548         return addr == work->gtt_offset;
11549 }
11550
11551 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11552 {
11553         struct drm_device *dev = &dev_priv->drm;
11554         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11555         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11556         struct intel_flip_work *work;
11557
11558         WARN_ON(!in_interrupt());
11559
11560         if (crtc == NULL)
11561                 return;
11562
11563         spin_lock(&dev->event_lock);
11564         work = intel_crtc->flip_work;
11565
11566         if (work != NULL && !is_mmio_work(work) &&
11567             __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11568                 WARN_ONCE(1,
11569                           "Kicking stuck page flip: queued at %d, now %d\n",
11570                         work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11571                 page_flip_completed(intel_crtc);
11572                 work = NULL;
11573         }
11574
11575         if (work != NULL && !is_mmio_work(work) &&
11576             intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11577                 intel_queue_rps_boost_for_request(work->flip_queued_req);
11578         spin_unlock(&dev->event_lock);
11579 }
11580
11581 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11582                                 struct drm_framebuffer *fb,
11583                                 struct drm_pending_vblank_event *event,
11584                                 uint32_t page_flip_flags)
11585 {
11586         struct drm_device *dev = crtc->dev;
11587         struct drm_i915_private *dev_priv = to_i915(dev);
11588         struct drm_framebuffer *old_fb = crtc->primary->fb;
11589         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11590         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11591         struct drm_plane *primary = crtc->primary;
11592         enum pipe pipe = intel_crtc->pipe;
11593         struct intel_flip_work *work;
11594         struct intel_engine_cs *engine;
11595         bool mmio_flip;
11596         struct drm_i915_gem_request *request = NULL;
11597         int ret;
11598
11599         /*
11600          * drm_mode_page_flip_ioctl() should already catch this, but double
11601          * check to be safe.  In the future we may enable pageflipping from
11602          * a disabled primary plane.
11603          */
11604         if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11605                 return -EBUSY;
11606
11607         /* Can't change pixel format via MI display flips. */
11608         if (fb->pixel_format != crtc->primary->fb->pixel_format)
11609                 return -EINVAL;
11610
11611         /*
11612          * TILEOFF/LINOFF registers can't be changed via MI display flips.
11613          * Note that pitch changes could also affect these register.
11614          */
11615         if (INTEL_INFO(dev)->gen > 3 &&
11616             (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11617              fb->pitches[0] != crtc->primary->fb->pitches[0]))
11618                 return -EINVAL;
11619
11620         if (i915_terminally_wedged(&dev_priv->gpu_error))
11621                 goto out_hang;
11622
11623         work = kzalloc(sizeof(*work), GFP_KERNEL);
11624         if (work == NULL)
11625                 return -ENOMEM;
11626
11627         work->event = event;
11628         work->crtc = crtc;
11629         work->old_fb = old_fb;
11630         INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11631
11632         ret = drm_crtc_vblank_get(crtc);
11633         if (ret)
11634                 goto free_work;
11635
11636         /* We borrow the event spin lock for protecting flip_work */
11637         spin_lock_irq(&dev->event_lock);
11638         if (intel_crtc->flip_work) {
11639                 /* Before declaring the flip queue wedged, check if
11640                  * the hardware completed the operation behind our backs.
11641                  */
11642                 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11643                         DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11644                         page_flip_completed(intel_crtc);
11645                 } else {
11646                         DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11647                         spin_unlock_irq(&dev->event_lock);
11648
11649                         drm_crtc_vblank_put(crtc);
11650                         kfree(work);
11651                         return -EBUSY;
11652                 }
11653         }
11654         intel_crtc->flip_work = work;
11655         spin_unlock_irq(&dev->event_lock);
11656
11657         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11658                 flush_workqueue(dev_priv->wq);
11659
11660         /* Reference the objects for the scheduled work. */
11661         drm_framebuffer_reference(work->old_fb);
11662         drm_gem_object_reference(&obj->base);
11663
11664         crtc->primary->fb = fb;
11665         update_state_fb(crtc->primary);
11666
11667         intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11668                              to_intel_plane_state(primary->state));
11669
11670         work->pending_flip_obj = obj;
11671
11672         ret = i915_mutex_lock_interruptible(dev);
11673         if (ret)
11674                 goto cleanup;
11675
11676         intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11677         if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11678                 ret = -EIO;
11679                 goto cleanup;
11680         }
11681
11682         atomic_inc(&intel_crtc->unpin_work_count);
11683
11684         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11685                 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11686
11687         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11688                 engine = &dev_priv->engine[BCS];
11689                 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11690                         /* vlv: DISPLAY_FLIP fails to change tiling */
11691                         engine = NULL;
11692         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11693                 engine = &dev_priv->engine[BCS];
11694         } else if (INTEL_INFO(dev)->gen >= 7) {
11695                 engine = i915_gem_request_get_engine(obj->last_write_req);
11696                 if (engine == NULL || engine->id != RCS)
11697                         engine = &dev_priv->engine[BCS];
11698         } else {
11699                 engine = &dev_priv->engine[RCS];
11700         }
11701
11702         mmio_flip = use_mmio_flip(engine, obj);
11703
11704         /* When using CS flips, we want to emit semaphores between rings.
11705          * However, when using mmio flips we will create a task to do the
11706          * synchronisation, so all we want here is to pin the framebuffer
11707          * into the display plane and skip any waits.
11708          */
11709         if (!mmio_flip) {
11710                 ret = i915_gem_object_sync(obj, engine, &request);
11711                 if (!ret && !request) {
11712                         request = i915_gem_request_alloc(engine, NULL);
11713                         ret = PTR_ERR_OR_ZERO(request);
11714                 }
11715
11716                 if (ret)
11717                         goto cleanup_pending;
11718         }
11719
11720         ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11721         if (ret)
11722                 goto cleanup_pending;
11723
11724         work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11725                                                   obj, 0);
11726         work->gtt_offset += intel_crtc->dspaddr_offset;
11727         work->rotation = crtc->primary->state->rotation;
11728
11729         if (mmio_flip) {
11730                 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11731
11732                 i915_gem_request_assign(&work->flip_queued_req,
11733                                         obj->last_write_req);
11734
11735                 schedule_work(&work->mmio_work);
11736         } else {
11737                 i915_gem_request_assign(&work->flip_queued_req, request);
11738                 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11739                                                    page_flip_flags);
11740                 if (ret)
11741                         goto cleanup_unpin;
11742
11743                 intel_mark_page_flip_active(intel_crtc, work);
11744
11745                 i915_add_request_no_flush(request);
11746         }
11747
11748         i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11749                           to_intel_plane(primary)->frontbuffer_bit);
11750         mutex_unlock(&dev->struct_mutex);
11751
11752         intel_frontbuffer_flip_prepare(dev,
11753                                        to_intel_plane(primary)->frontbuffer_bit);
11754
11755         trace_i915_flip_request(intel_crtc->plane, obj);
11756
11757         return 0;
11758
11759 cleanup_unpin:
11760         intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11761 cleanup_pending:
11762         if (!IS_ERR_OR_NULL(request))
11763                 i915_add_request_no_flush(request);
11764         atomic_dec(&intel_crtc->unpin_work_count);
11765         mutex_unlock(&dev->struct_mutex);
11766 cleanup:
11767         crtc->primary->fb = old_fb;
11768         update_state_fb(crtc->primary);
11769
11770         drm_gem_object_unreference_unlocked(&obj->base);
11771         drm_framebuffer_unreference(work->old_fb);
11772
11773         spin_lock_irq(&dev->event_lock);
11774         intel_crtc->flip_work = NULL;
11775         spin_unlock_irq(&dev->event_lock);
11776
11777         drm_crtc_vblank_put(crtc);
11778 free_work:
11779         kfree(work);
11780
11781         if (ret == -EIO) {
11782                 struct drm_atomic_state *state;
11783                 struct drm_plane_state *plane_state;
11784
11785 out_hang:
11786                 state = drm_atomic_state_alloc(dev);
11787                 if (!state)
11788                         return -ENOMEM;
11789                 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11790
11791 retry:
11792                 plane_state = drm_atomic_get_plane_state(state, primary);
11793                 ret = PTR_ERR_OR_ZERO(plane_state);
11794                 if (!ret) {
11795                         drm_atomic_set_fb_for_plane(plane_state, fb);
11796
11797                         ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11798                         if (!ret)
11799                                 ret = drm_atomic_commit(state);
11800                 }
11801
11802                 if (ret == -EDEADLK) {
11803                         drm_modeset_backoff(state->acquire_ctx);
11804                         drm_atomic_state_clear(state);
11805                         goto retry;
11806                 }
11807
11808                 if (ret)
11809                         drm_atomic_state_free(state);
11810
11811                 if (ret == 0 && event) {
11812                         spin_lock_irq(&dev->event_lock);
11813                         drm_crtc_send_vblank_event(crtc, event);
11814                         spin_unlock_irq(&dev->event_lock);
11815                 }
11816         }
11817         return ret;
11818 }
11819
11820
11821 /**
11822  * intel_wm_need_update - Check whether watermarks need updating
11823  * @plane: drm plane
11824  * @state: new plane state
11825  *
11826  * Check current plane state versus the new one to determine whether
11827  * watermarks need to be recalculated.
11828  *
11829  * Returns true or false.
11830  */
11831 static bool intel_wm_need_update(struct drm_plane *plane,
11832                                  struct drm_plane_state *state)
11833 {
11834         struct intel_plane_state *new = to_intel_plane_state(state);
11835         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11836
11837         /* Update watermarks on tiling or size changes. */
11838         if (new->visible != cur->visible)
11839                 return true;
11840
11841         if (!cur->base.fb || !new->base.fb)
11842                 return false;
11843
11844         if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11845             cur->base.rotation != new->base.rotation ||
11846             drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11847             drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11848             drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11849             drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11850                 return true;
11851
11852         return false;
11853 }
11854
11855 static bool needs_scaling(struct intel_plane_state *state)
11856 {
11857         int src_w = drm_rect_width(&state->src) >> 16;
11858         int src_h = drm_rect_height(&state->src) >> 16;
11859         int dst_w = drm_rect_width(&state->dst);
11860         int dst_h = drm_rect_height(&state->dst);
11861
11862         return (src_w != dst_w || src_h != dst_h);
11863 }
11864
11865 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11866                                     struct drm_plane_state *plane_state)
11867 {
11868         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11869         struct drm_crtc *crtc = crtc_state->crtc;
11870         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11871         struct drm_plane *plane = plane_state->plane;
11872         struct drm_device *dev = crtc->dev;
11873         struct drm_i915_private *dev_priv = to_i915(dev);
11874         struct intel_plane_state *old_plane_state =
11875                 to_intel_plane_state(plane->state);
11876         bool mode_changed = needs_modeset(crtc_state);
11877         bool was_crtc_enabled = crtc->state->active;
11878         bool is_crtc_enabled = crtc_state->active;
11879         bool turn_off, turn_on, visible, was_visible;
11880         struct drm_framebuffer *fb = plane_state->fb;
11881         int ret;
11882
11883         if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
11884                 ret = skl_update_scaler_plane(
11885                         to_intel_crtc_state(crtc_state),
11886                         to_intel_plane_state(plane_state));
11887                 if (ret)
11888                         return ret;
11889         }
11890
11891         was_visible = old_plane_state->visible;
11892         visible = to_intel_plane_state(plane_state)->visible;
11893
11894         if (!was_crtc_enabled && WARN_ON(was_visible))
11895                 was_visible = false;
11896
11897         /*
11898          * Visibility is calculated as if the crtc was on, but
11899          * after scaler setup everything depends on it being off
11900          * when the crtc isn't active.
11901          *
11902          * FIXME this is wrong for watermarks. Watermarks should also
11903          * be computed as if the pipe would be active. Perhaps move
11904          * per-plane wm computation to the .check_plane() hook, and
11905          * only combine the results from all planes in the current place?
11906          */
11907         if (!is_crtc_enabled)
11908                 to_intel_plane_state(plane_state)->visible = visible = false;
11909
11910         if (!was_visible && !visible)
11911                 return 0;
11912
11913         if (fb != old_plane_state->base.fb)
11914                 pipe_config->fb_changed = true;
11915
11916         turn_off = was_visible && (!visible || mode_changed);
11917         turn_on = visible && (!was_visible || mode_changed);
11918
11919         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11920                          intel_crtc->base.base.id,
11921                          intel_crtc->base.name,
11922                          plane->base.id, plane->name,
11923                          fb ? fb->base.id : -1);
11924
11925         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11926                          plane->base.id, plane->name,
11927                          was_visible, visible,
11928                          turn_off, turn_on, mode_changed);
11929
11930         if (turn_on) {
11931                 pipe_config->update_wm_pre = true;
11932
11933                 /* must disable cxsr around plane enable/disable */
11934                 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11935                         pipe_config->disable_cxsr = true;
11936         } else if (turn_off) {
11937                 pipe_config->update_wm_post = true;
11938
11939                 /* must disable cxsr around plane enable/disable */
11940                 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11941                         pipe_config->disable_cxsr = true;
11942         } else if (intel_wm_need_update(plane, plane_state)) {
11943                 /* FIXME bollocks */
11944                 pipe_config->update_wm_pre = true;
11945                 pipe_config->update_wm_post = true;
11946         }
11947
11948         /* Pre-gen9 platforms need two-step watermark updates */
11949         if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11950             INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
11951                 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11952
11953         if (visible || was_visible)
11954                 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
11955
11956         /*
11957          * WaCxSRDisabledForSpriteScaling:ivb
11958          *
11959          * cstate->update_wm was already set above, so this flag will
11960          * take effect when we commit and program watermarks.
11961          */
11962         if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11963             needs_scaling(to_intel_plane_state(plane_state)) &&
11964             !needs_scaling(old_plane_state))
11965                 pipe_config->disable_lp_wm = true;
11966
11967         return 0;
11968 }
11969
11970 static bool encoders_cloneable(const struct intel_encoder *a,
11971                                const struct intel_encoder *b)
11972 {
11973         /* masks could be asymmetric, so check both ways */
11974         return a == b || (a->cloneable & (1 << b->type) &&
11975                           b->cloneable & (1 << a->type));
11976 }
11977
11978 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11979                                          struct intel_crtc *crtc,
11980                                          struct intel_encoder *encoder)
11981 {
11982         struct intel_encoder *source_encoder;
11983         struct drm_connector *connector;
11984         struct drm_connector_state *connector_state;
11985         int i;
11986
11987         for_each_connector_in_state(state, connector, connector_state, i) {
11988                 if (connector_state->crtc != &crtc->base)
11989                         continue;
11990
11991                 source_encoder =
11992                         to_intel_encoder(connector_state->best_encoder);
11993                 if (!encoders_cloneable(encoder, source_encoder))
11994                         return false;
11995         }
11996
11997         return true;
11998 }
11999
12000 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12001                                    struct drm_crtc_state *crtc_state)
12002 {
12003         struct drm_device *dev = crtc->dev;
12004         struct drm_i915_private *dev_priv = to_i915(dev);
12005         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12006         struct intel_crtc_state *pipe_config =
12007                 to_intel_crtc_state(crtc_state);
12008         struct drm_atomic_state *state = crtc_state->state;
12009         int ret;
12010         bool mode_changed = needs_modeset(crtc_state);
12011
12012         if (mode_changed && !crtc_state->active)
12013                 pipe_config->update_wm_post = true;
12014
12015         if (mode_changed && crtc_state->enable &&
12016             dev_priv->display.crtc_compute_clock &&
12017             !WARN_ON(pipe_config->shared_dpll)) {
12018                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12019                                                            pipe_config);
12020                 if (ret)
12021                         return ret;
12022         }
12023
12024         if (crtc_state->color_mgmt_changed) {
12025                 ret = intel_color_check(crtc, crtc_state);
12026                 if (ret)
12027                         return ret;
12028         }
12029
12030         ret = 0;
12031         if (dev_priv->display.compute_pipe_wm) {
12032                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
12033                 if (ret) {
12034                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12035                         return ret;
12036                 }
12037         }
12038
12039         if (dev_priv->display.compute_intermediate_wm &&
12040             !to_intel_atomic_state(state)->skip_intermediate_wm) {
12041                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12042                         return 0;
12043
12044                 /*
12045                  * Calculate 'intermediate' watermarks that satisfy both the
12046                  * old state and the new state.  We can program these
12047                  * immediately.
12048                  */
12049                 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12050                                                                 intel_crtc,
12051                                                                 pipe_config);
12052                 if (ret) {
12053                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12054                         return ret;
12055                 }
12056         } else if (dev_priv->display.compute_intermediate_wm) {
12057                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12058                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12059         }
12060
12061         if (INTEL_INFO(dev)->gen >= 9) {
12062                 if (mode_changed)
12063                         ret = skl_update_scaler_crtc(pipe_config);
12064
12065                 if (!ret)
12066                         ret = intel_atomic_setup_scalers(dev, intel_crtc,
12067                                                          pipe_config);
12068         }
12069
12070         return ret;
12071 }
12072
12073 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12074         .mode_set_base_atomic = intel_pipe_set_base_atomic,
12075         .atomic_begin = intel_begin_crtc_commit,
12076         .atomic_flush = intel_finish_crtc_commit,
12077         .atomic_check = intel_crtc_atomic_check,
12078 };
12079
12080 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12081 {
12082         struct intel_connector *connector;
12083
12084         for_each_intel_connector(dev, connector) {
12085                 if (connector->base.state->crtc)
12086                         drm_connector_unreference(&connector->base);
12087
12088                 if (connector->base.encoder) {
12089                         connector->base.state->best_encoder =
12090                                 connector->base.encoder;
12091                         connector->base.state->crtc =
12092                                 connector->base.encoder->crtc;
12093
12094                         drm_connector_reference(&connector->base);
12095                 } else {
12096                         connector->base.state->best_encoder = NULL;
12097                         connector->base.state->crtc = NULL;
12098                 }
12099         }
12100 }
12101
12102 static void
12103 connected_sink_compute_bpp(struct intel_connector *connector,
12104                            struct intel_crtc_state *pipe_config)
12105 {
12106         int bpp = pipe_config->pipe_bpp;
12107
12108         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12109                 connector->base.base.id,
12110                 connector->base.name);
12111
12112         /* Don't use an invalid EDID bpc value */
12113         if (connector->base.display_info.bpc &&
12114             connector->base.display_info.bpc * 3 < bpp) {
12115                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12116                               bpp, connector->base.display_info.bpc*3);
12117                 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12118         }
12119
12120         /* Clamp bpp to default limit on screens without EDID 1.4 */
12121         if (connector->base.display_info.bpc == 0) {
12122                 int type = connector->base.connector_type;
12123                 int clamp_bpp = 24;
12124
12125                 /* Fall back to 18 bpp when DP sink capability is unknown. */
12126                 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12127                     type == DRM_MODE_CONNECTOR_eDP)
12128                         clamp_bpp = 18;
12129
12130                 if (bpp > clamp_bpp) {
12131                         DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12132                                       bpp, clamp_bpp);
12133                         pipe_config->pipe_bpp = clamp_bpp;
12134                 }
12135         }
12136 }
12137
12138 static int
12139 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12140                           struct intel_crtc_state *pipe_config)
12141 {
12142         struct drm_device *dev = crtc->base.dev;
12143         struct drm_atomic_state *state;
12144         struct drm_connector *connector;
12145         struct drm_connector_state *connector_state;
12146         int bpp, i;
12147
12148         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12149                 bpp = 10*3;
12150         else if (INTEL_INFO(dev)->gen >= 5)
12151                 bpp = 12*3;
12152         else
12153                 bpp = 8*3;
12154
12155
12156         pipe_config->pipe_bpp = bpp;
12157
12158         state = pipe_config->base.state;
12159
12160         /* Clamp display bpp to EDID value */
12161         for_each_connector_in_state(state, connector, connector_state, i) {
12162                 if (connector_state->crtc != &crtc->base)
12163                         continue;
12164
12165                 connected_sink_compute_bpp(to_intel_connector(connector),
12166                                            pipe_config);
12167         }
12168
12169         return bpp;
12170 }
12171
12172 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12173 {
12174         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12175                         "type: 0x%x flags: 0x%x\n",
12176                 mode->crtc_clock,
12177                 mode->crtc_hdisplay, mode->crtc_hsync_start,
12178                 mode->crtc_hsync_end, mode->crtc_htotal,
12179                 mode->crtc_vdisplay, mode->crtc_vsync_start,
12180                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12181 }
12182
12183 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12184                                    struct intel_crtc_state *pipe_config,
12185                                    const char *context)
12186 {
12187         struct drm_device *dev = crtc->base.dev;
12188         struct drm_plane *plane;
12189         struct intel_plane *intel_plane;
12190         struct intel_plane_state *state;
12191         struct drm_framebuffer *fb;
12192
12193         DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12194                       crtc->base.base.id, crtc->base.name,
12195                       context, pipe_config, pipe_name(crtc->pipe));
12196
12197         DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12198         DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12199                       pipe_config->pipe_bpp, pipe_config->dither);
12200         DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12201                       pipe_config->has_pch_encoder,
12202                       pipe_config->fdi_lanes,
12203                       pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12204                       pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12205                       pipe_config->fdi_m_n.tu);
12206         DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12207                       intel_crtc_has_dp_encoder(pipe_config),
12208                       pipe_config->lane_count,
12209                       pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12210                       pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12211                       pipe_config->dp_m_n.tu);
12212
12213         DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12214                       intel_crtc_has_dp_encoder(pipe_config),
12215                       pipe_config->lane_count,
12216                       pipe_config->dp_m2_n2.gmch_m,
12217                       pipe_config->dp_m2_n2.gmch_n,
12218                       pipe_config->dp_m2_n2.link_m,
12219                       pipe_config->dp_m2_n2.link_n,
12220                       pipe_config->dp_m2_n2.tu);
12221
12222         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12223                       pipe_config->has_audio,
12224                       pipe_config->has_infoframe);
12225
12226         DRM_DEBUG_KMS("requested mode:\n");
12227         drm_mode_debug_printmodeline(&pipe_config->base.mode);
12228         DRM_DEBUG_KMS("adjusted mode:\n");
12229         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12230         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12231         DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12232         DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12233                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12234         DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12235                       crtc->num_scalers,
12236                       pipe_config->scaler_state.scaler_users,
12237                       pipe_config->scaler_state.scaler_id);
12238         DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12239                       pipe_config->gmch_pfit.control,
12240                       pipe_config->gmch_pfit.pgm_ratios,
12241                       pipe_config->gmch_pfit.lvds_border_bits);
12242         DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12243                       pipe_config->pch_pfit.pos,
12244                       pipe_config->pch_pfit.size,
12245                       pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12246         DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12247         DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12248
12249         if (IS_BROXTON(dev)) {
12250                 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12251                               "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12252                               "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12253                               pipe_config->ddi_pll_sel,
12254                               pipe_config->dpll_hw_state.ebb0,
12255                               pipe_config->dpll_hw_state.ebb4,
12256                               pipe_config->dpll_hw_state.pll0,
12257                               pipe_config->dpll_hw_state.pll1,
12258                               pipe_config->dpll_hw_state.pll2,
12259                               pipe_config->dpll_hw_state.pll3,
12260                               pipe_config->dpll_hw_state.pll6,
12261                               pipe_config->dpll_hw_state.pll8,
12262                               pipe_config->dpll_hw_state.pll9,
12263                               pipe_config->dpll_hw_state.pll10,
12264                               pipe_config->dpll_hw_state.pcsdw12);
12265         } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12266                 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12267                               "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12268                               pipe_config->ddi_pll_sel,
12269                               pipe_config->dpll_hw_state.ctrl1,
12270                               pipe_config->dpll_hw_state.cfgcr1,
12271                               pipe_config->dpll_hw_state.cfgcr2);
12272         } else if (HAS_DDI(dev)) {
12273                 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12274                               pipe_config->ddi_pll_sel,
12275                               pipe_config->dpll_hw_state.wrpll,
12276                               pipe_config->dpll_hw_state.spll);
12277         } else {
12278                 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12279                               "fp0: 0x%x, fp1: 0x%x\n",
12280                               pipe_config->dpll_hw_state.dpll,
12281                               pipe_config->dpll_hw_state.dpll_md,
12282                               pipe_config->dpll_hw_state.fp0,
12283                               pipe_config->dpll_hw_state.fp1);
12284         }
12285
12286         DRM_DEBUG_KMS("planes on this crtc\n");
12287         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12288                 intel_plane = to_intel_plane(plane);
12289                 if (intel_plane->pipe != crtc->pipe)
12290                         continue;
12291
12292                 state = to_intel_plane_state(plane->state);
12293                 fb = state->base.fb;
12294                 if (!fb) {
12295                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12296                                       plane->base.id, plane->name, state->scaler_id);
12297                         continue;
12298                 }
12299
12300                 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12301                               plane->base.id, plane->name);
12302                 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12303                               fb->base.id, fb->width, fb->height,
12304                               drm_get_format_name(fb->pixel_format));
12305                 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12306                               state->scaler_id,
12307                               state->src.x1 >> 16, state->src.y1 >> 16,
12308                               drm_rect_width(&state->src) >> 16,
12309                               drm_rect_height(&state->src) >> 16,
12310                               state->dst.x1, state->dst.y1,
12311                               drm_rect_width(&state->dst),
12312                               drm_rect_height(&state->dst));
12313         }
12314 }
12315
12316 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12317 {
12318         struct drm_device *dev = state->dev;
12319         struct drm_connector *connector;
12320         unsigned int used_ports = 0;
12321
12322         /*
12323          * Walk the connector list instead of the encoder
12324          * list to detect the problem on ddi platforms
12325          * where there's just one encoder per digital port.
12326          */
12327         drm_for_each_connector(connector, dev) {
12328                 struct drm_connector_state *connector_state;
12329                 struct intel_encoder *encoder;
12330
12331                 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12332                 if (!connector_state)
12333                         connector_state = connector->state;
12334
12335                 if (!connector_state->best_encoder)
12336                         continue;
12337
12338                 encoder = to_intel_encoder(connector_state->best_encoder);
12339
12340                 WARN_ON(!connector_state->crtc);
12341
12342                 switch (encoder->type) {
12343                         unsigned int port_mask;
12344                 case INTEL_OUTPUT_UNKNOWN:
12345                         if (WARN_ON(!HAS_DDI(dev)))
12346                                 break;
12347                 case INTEL_OUTPUT_DP:
12348                 case INTEL_OUTPUT_HDMI:
12349                 case INTEL_OUTPUT_EDP:
12350                         port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12351
12352                         /* the same port mustn't appear more than once */
12353                         if (used_ports & port_mask)
12354                                 return false;
12355
12356                         used_ports |= port_mask;
12357                 default:
12358                         break;
12359                 }
12360         }
12361
12362         return true;
12363 }
12364
12365 static void
12366 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12367 {
12368         struct drm_crtc_state tmp_state;
12369         struct intel_crtc_scaler_state scaler_state;
12370         struct intel_dpll_hw_state dpll_hw_state;
12371         struct intel_shared_dpll *shared_dpll;
12372         uint32_t ddi_pll_sel;
12373         bool force_thru;
12374
12375         /* FIXME: before the switch to atomic started, a new pipe_config was
12376          * kzalloc'd. Code that depends on any field being zero should be
12377          * fixed, so that the crtc_state can be safely duplicated. For now,
12378          * only fields that are know to not cause problems are preserved. */
12379
12380         tmp_state = crtc_state->base;
12381         scaler_state = crtc_state->scaler_state;
12382         shared_dpll = crtc_state->shared_dpll;
12383         dpll_hw_state = crtc_state->dpll_hw_state;
12384         ddi_pll_sel = crtc_state->ddi_pll_sel;
12385         force_thru = crtc_state->pch_pfit.force_thru;
12386
12387         memset(crtc_state, 0, sizeof *crtc_state);
12388
12389         crtc_state->base = tmp_state;
12390         crtc_state->scaler_state = scaler_state;
12391         crtc_state->shared_dpll = shared_dpll;
12392         crtc_state->dpll_hw_state = dpll_hw_state;
12393         crtc_state->ddi_pll_sel = ddi_pll_sel;
12394         crtc_state->pch_pfit.force_thru = force_thru;
12395 }
12396
12397 static int
12398 intel_modeset_pipe_config(struct drm_crtc *crtc,
12399                           struct intel_crtc_state *pipe_config)
12400 {
12401         struct drm_atomic_state *state = pipe_config->base.state;
12402         struct intel_encoder *encoder;
12403         struct drm_connector *connector;
12404         struct drm_connector_state *connector_state;
12405         int base_bpp, ret = -EINVAL;
12406         int i;
12407         bool retry = true;
12408
12409         clear_intel_crtc_state(pipe_config);
12410
12411         pipe_config->cpu_transcoder =
12412                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12413
12414         /*
12415          * Sanitize sync polarity flags based on requested ones. If neither
12416          * positive or negative polarity is requested, treat this as meaning
12417          * negative polarity.
12418          */
12419         if (!(pipe_config->base.adjusted_mode.flags &
12420               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12421                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12422
12423         if (!(pipe_config->base.adjusted_mode.flags &
12424               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12425                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12426
12427         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12428                                              pipe_config);
12429         if (base_bpp < 0)
12430                 goto fail;
12431
12432         /*
12433          * Determine the real pipe dimensions. Note that stereo modes can
12434          * increase the actual pipe size due to the frame doubling and
12435          * insertion of additional space for blanks between the frame. This
12436          * is stored in the crtc timings. We use the requested mode to do this
12437          * computation to clearly distinguish it from the adjusted mode, which
12438          * can be changed by the connectors in the below retry loop.
12439          */
12440         drm_crtc_get_hv_timing(&pipe_config->base.mode,
12441                                &pipe_config->pipe_src_w,
12442                                &pipe_config->pipe_src_h);
12443
12444         for_each_connector_in_state(state, connector, connector_state, i) {
12445                 if (connector_state->crtc != crtc)
12446                         continue;
12447
12448                 encoder = to_intel_encoder(connector_state->best_encoder);
12449
12450                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12451                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12452                         goto fail;
12453                 }
12454
12455                 /*
12456                  * Determine output_types before calling the .compute_config()
12457                  * hooks so that the hooks can use this information safely.
12458                  */
12459                 pipe_config->output_types |= 1 << encoder->type;
12460         }
12461
12462 encoder_retry:
12463         /* Ensure the port clock defaults are reset when retrying. */
12464         pipe_config->port_clock = 0;
12465         pipe_config->pixel_multiplier = 1;
12466
12467         /* Fill in default crtc timings, allow encoders to overwrite them. */
12468         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12469                               CRTC_STEREO_DOUBLE);
12470
12471         /* Pass our mode to the connectors and the CRTC to give them a chance to
12472          * adjust it according to limitations or connector properties, and also
12473          * a chance to reject the mode entirely.
12474          */
12475         for_each_connector_in_state(state, connector, connector_state, i) {
12476                 if (connector_state->crtc != crtc)
12477                         continue;
12478
12479                 encoder = to_intel_encoder(connector_state->best_encoder);
12480
12481                 if (!(encoder->compute_config(encoder, pipe_config))) {
12482                         DRM_DEBUG_KMS("Encoder config failure\n");
12483                         goto fail;
12484                 }
12485         }
12486
12487         /* Set default port clock if not overwritten by the encoder. Needs to be
12488          * done afterwards in case the encoder adjusts the mode. */
12489         if (!pipe_config->port_clock)
12490                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12491                         * pipe_config->pixel_multiplier;
12492
12493         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12494         if (ret < 0) {
12495                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12496                 goto fail;
12497         }
12498
12499         if (ret == RETRY) {
12500                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12501                         ret = -EINVAL;
12502                         goto fail;
12503                 }
12504
12505                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12506                 retry = false;
12507                 goto encoder_retry;
12508         }
12509
12510         /* Dithering seems to not pass-through bits correctly when it should, so
12511          * only enable it on 6bpc panels. */
12512         pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12513         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12514                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12515
12516 fail:
12517         return ret;
12518 }
12519
12520 static void
12521 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12522 {
12523         struct drm_crtc *crtc;
12524         struct drm_crtc_state *crtc_state;
12525         int i;
12526
12527         /* Double check state. */
12528         for_each_crtc_in_state(state, crtc, crtc_state, i) {
12529                 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12530
12531                 /* Update hwmode for vblank functions */
12532                 if (crtc->state->active)
12533                         crtc->hwmode = crtc->state->adjusted_mode;
12534                 else
12535                         crtc->hwmode.crtc_clock = 0;
12536
12537                 /*
12538                  * Update legacy state to satisfy fbc code. This can
12539                  * be removed when fbc uses the atomic state.
12540                  */
12541                 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12542                         struct drm_plane_state *plane_state = crtc->primary->state;
12543
12544                         crtc->primary->fb = plane_state->fb;
12545                         crtc->x = plane_state->src_x >> 16;
12546                         crtc->y = plane_state->src_y >> 16;
12547                 }
12548         }
12549 }
12550
12551 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12552 {
12553         int diff;
12554
12555         if (clock1 == clock2)
12556                 return true;
12557
12558         if (!clock1 || !clock2)
12559                 return false;
12560
12561         diff = abs(clock1 - clock2);
12562
12563         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12564                 return true;
12565
12566         return false;
12567 }
12568
12569 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12570         list_for_each_entry((intel_crtc), \
12571                             &(dev)->mode_config.crtc_list, \
12572                             base.head) \
12573                 for_each_if (mask & (1 <<(intel_crtc)->pipe))
12574
12575 static bool
12576 intel_compare_m_n(unsigned int m, unsigned int n,
12577                   unsigned int m2, unsigned int n2,
12578                   bool exact)
12579 {
12580         if (m == m2 && n == n2)
12581                 return true;
12582
12583         if (exact || !m || !n || !m2 || !n2)
12584                 return false;
12585
12586         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12587
12588         if (n > n2) {
12589                 while (n > n2) {
12590                         m2 <<= 1;
12591                         n2 <<= 1;
12592                 }
12593         } else if (n < n2) {
12594                 while (n < n2) {
12595                         m <<= 1;
12596                         n <<= 1;
12597                 }
12598         }
12599
12600         if (n != n2)
12601                 return false;
12602
12603         return intel_fuzzy_clock_check(m, m2);
12604 }
12605
12606 static bool
12607 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12608                        struct intel_link_m_n *m2_n2,
12609                        bool adjust)
12610 {
12611         if (m_n->tu == m2_n2->tu &&
12612             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12613                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12614             intel_compare_m_n(m_n->link_m, m_n->link_n,
12615                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
12616                 if (adjust)
12617                         *m2_n2 = *m_n;
12618
12619                 return true;
12620         }
12621
12622         return false;
12623 }
12624
12625 static bool
12626 intel_pipe_config_compare(struct drm_device *dev,
12627                           struct intel_crtc_state *current_config,
12628                           struct intel_crtc_state *pipe_config,
12629                           bool adjust)
12630 {
12631         bool ret = true;
12632
12633 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12634         do { \
12635                 if (!adjust) \
12636                         DRM_ERROR(fmt, ##__VA_ARGS__); \
12637                 else \
12638                         DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12639         } while (0)
12640
12641 #define PIPE_CONF_CHECK_X(name) \
12642         if (current_config->name != pipe_config->name) { \
12643                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12644                           "(expected 0x%08x, found 0x%08x)\n", \
12645                           current_config->name, \
12646                           pipe_config->name); \
12647                 ret = false; \
12648         }
12649
12650 #define PIPE_CONF_CHECK_I(name) \
12651         if (current_config->name != pipe_config->name) { \
12652                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12653                           "(expected %i, found %i)\n", \
12654                           current_config->name, \
12655                           pipe_config->name); \
12656                 ret = false; \
12657         }
12658
12659 #define PIPE_CONF_CHECK_P(name) \
12660         if (current_config->name != pipe_config->name) { \
12661                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12662                           "(expected %p, found %p)\n", \
12663                           current_config->name, \
12664                           pipe_config->name); \
12665                 ret = false; \
12666         }
12667
12668 #define PIPE_CONF_CHECK_M_N(name) \
12669         if (!intel_compare_link_m_n(&current_config->name, \
12670                                     &pipe_config->name,\
12671                                     adjust)) { \
12672                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12673                           "(expected tu %i gmch %i/%i link %i/%i, " \
12674                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12675                           current_config->name.tu, \
12676                           current_config->name.gmch_m, \
12677                           current_config->name.gmch_n, \
12678                           current_config->name.link_m, \
12679                           current_config->name.link_n, \
12680                           pipe_config->name.tu, \
12681                           pipe_config->name.gmch_m, \
12682                           pipe_config->name.gmch_n, \
12683                           pipe_config->name.link_m, \
12684                           pipe_config->name.link_n); \
12685                 ret = false; \
12686         }
12687
12688 /* This is required for BDW+ where there is only one set of registers for
12689  * switching between high and low RR.
12690  * This macro can be used whenever a comparison has to be made between one
12691  * hw state and multiple sw state variables.
12692  */
12693 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12694         if (!intel_compare_link_m_n(&current_config->name, \
12695                                     &pipe_config->name, adjust) && \
12696             !intel_compare_link_m_n(&current_config->alt_name, \
12697                                     &pipe_config->name, adjust)) { \
12698                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12699                           "(expected tu %i gmch %i/%i link %i/%i, " \
12700                           "or tu %i gmch %i/%i link %i/%i, " \
12701                           "found tu %i, gmch %i/%i link %i/%i)\n", \
12702                           current_config->name.tu, \
12703                           current_config->name.gmch_m, \
12704                           current_config->name.gmch_n, \
12705                           current_config->name.link_m, \
12706                           current_config->name.link_n, \
12707                           current_config->alt_name.tu, \
12708                           current_config->alt_name.gmch_m, \
12709                           current_config->alt_name.gmch_n, \
12710                           current_config->alt_name.link_m, \
12711                           current_config->alt_name.link_n, \
12712                           pipe_config->name.tu, \
12713                           pipe_config->name.gmch_m, \
12714                           pipe_config->name.gmch_n, \
12715                           pipe_config->name.link_m, \
12716                           pipe_config->name.link_n); \
12717                 ret = false; \
12718         }
12719
12720 #define PIPE_CONF_CHECK_FLAGS(name, mask)       \
12721         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12722                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12723                           "(expected %i, found %i)\n", \
12724                           current_config->name & (mask), \
12725                           pipe_config->name & (mask)); \
12726                 ret = false; \
12727         }
12728
12729 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12730         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12731                 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12732                           "(expected %i, found %i)\n", \
12733                           current_config->name, \
12734                           pipe_config->name); \
12735                 ret = false; \
12736         }
12737
12738 #define PIPE_CONF_QUIRK(quirk)  \
12739         ((current_config->quirks | pipe_config->quirks) & (quirk))
12740
12741         PIPE_CONF_CHECK_I(cpu_transcoder);
12742
12743         PIPE_CONF_CHECK_I(has_pch_encoder);
12744         PIPE_CONF_CHECK_I(fdi_lanes);
12745         PIPE_CONF_CHECK_M_N(fdi_m_n);
12746
12747         PIPE_CONF_CHECK_I(lane_count);
12748         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12749
12750         if (INTEL_INFO(dev)->gen < 8) {
12751                 PIPE_CONF_CHECK_M_N(dp_m_n);
12752
12753                 if (current_config->has_drrs)
12754                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12755         } else
12756                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12757
12758         PIPE_CONF_CHECK_X(output_types);
12759
12760         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12761         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12762         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12763         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12764         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12765         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12766
12767         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12768         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12769         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12770         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12771         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12772         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12773
12774         PIPE_CONF_CHECK_I(pixel_multiplier);
12775         PIPE_CONF_CHECK_I(has_hdmi_sink);
12776         if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12777             IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12778                 PIPE_CONF_CHECK_I(limited_color_range);
12779         PIPE_CONF_CHECK_I(has_infoframe);
12780
12781         PIPE_CONF_CHECK_I(has_audio);
12782
12783         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12784                               DRM_MODE_FLAG_INTERLACE);
12785
12786         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12787                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12788                                       DRM_MODE_FLAG_PHSYNC);
12789                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12790                                       DRM_MODE_FLAG_NHSYNC);
12791                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12792                                       DRM_MODE_FLAG_PVSYNC);
12793                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12794                                       DRM_MODE_FLAG_NVSYNC);
12795         }
12796
12797         PIPE_CONF_CHECK_X(gmch_pfit.control);
12798         /* pfit ratios are autocomputed by the hw on gen4+ */
12799         if (INTEL_INFO(dev)->gen < 4)
12800                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12801         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12802
12803         if (!adjust) {
12804                 PIPE_CONF_CHECK_I(pipe_src_w);
12805                 PIPE_CONF_CHECK_I(pipe_src_h);
12806
12807                 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12808                 if (current_config->pch_pfit.enabled) {
12809                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12810                         PIPE_CONF_CHECK_X(pch_pfit.size);
12811                 }
12812
12813                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12814         }
12815
12816         /* BDW+ don't expose a synchronous way to read the state */
12817         if (IS_HASWELL(dev))
12818                 PIPE_CONF_CHECK_I(ips_enabled);
12819
12820         PIPE_CONF_CHECK_I(double_wide);
12821
12822         PIPE_CONF_CHECK_X(ddi_pll_sel);
12823
12824         PIPE_CONF_CHECK_P(shared_dpll);
12825         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12826         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12827         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12828         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12829         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12830         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12831         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12832         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12833         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12834
12835         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12836         PIPE_CONF_CHECK_X(dsi_pll.div);
12837
12838         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12839                 PIPE_CONF_CHECK_I(pipe_bpp);
12840
12841         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12842         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12843
12844 #undef PIPE_CONF_CHECK_X
12845 #undef PIPE_CONF_CHECK_I
12846 #undef PIPE_CONF_CHECK_P
12847 #undef PIPE_CONF_CHECK_FLAGS
12848 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12849 #undef PIPE_CONF_QUIRK
12850 #undef INTEL_ERR_OR_DBG_KMS
12851
12852         return ret;
12853 }
12854
12855 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12856                                            const struct intel_crtc_state *pipe_config)
12857 {
12858         if (pipe_config->has_pch_encoder) {
12859                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12860                                                             &pipe_config->fdi_m_n);
12861                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12862
12863                 /*
12864                  * FDI already provided one idea for the dotclock.
12865                  * Yell if the encoder disagrees.
12866                  */
12867                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12868                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12869                      fdi_dotclock, dotclock);
12870         }
12871 }
12872
12873 static void verify_wm_state(struct drm_crtc *crtc,
12874                             struct drm_crtc_state *new_state)
12875 {
12876         struct drm_device *dev = crtc->dev;
12877         struct drm_i915_private *dev_priv = to_i915(dev);
12878         struct skl_ddb_allocation hw_ddb, *sw_ddb;
12879         struct skl_ddb_entry *hw_entry, *sw_entry;
12880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12881         const enum pipe pipe = intel_crtc->pipe;
12882         int plane;
12883
12884         if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12885                 return;
12886
12887         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12888         sw_ddb = &dev_priv->wm.skl_hw.ddb;
12889
12890         /* planes */
12891         for_each_plane(dev_priv, pipe, plane) {
12892                 hw_entry = &hw_ddb.plane[pipe][plane];
12893                 sw_entry = &sw_ddb->plane[pipe][plane];
12894
12895                 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12896                         continue;
12897
12898                 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12899                           "(expected (%u,%u), found (%u,%u))\n",
12900                           pipe_name(pipe), plane + 1,
12901                           sw_entry->start, sw_entry->end,
12902                           hw_entry->start, hw_entry->end);
12903         }
12904
12905         /* cursor */
12906         hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12907         sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12908
12909         if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
12910                 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12911                           "(expected (%u,%u), found (%u,%u))\n",
12912                           pipe_name(pipe),
12913                           sw_entry->start, sw_entry->end,
12914                           hw_entry->start, hw_entry->end);
12915         }
12916 }
12917
12918 static void
12919 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12920 {
12921         struct drm_connector *connector;
12922
12923         drm_for_each_connector(connector, dev) {
12924                 struct drm_encoder *encoder = connector->encoder;
12925                 struct drm_connector_state *state = connector->state;
12926
12927                 if (state->crtc != crtc)
12928                         continue;
12929
12930                 intel_connector_verify_state(to_intel_connector(connector));
12931
12932                 I915_STATE_WARN(state->best_encoder != encoder,
12933                      "connector's atomic encoder doesn't match legacy encoder\n");
12934         }
12935 }
12936
12937 static void
12938 verify_encoder_state(struct drm_device *dev)
12939 {
12940         struct intel_encoder *encoder;
12941         struct intel_connector *connector;
12942
12943         for_each_intel_encoder(dev, encoder) {
12944                 bool enabled = false;
12945                 enum pipe pipe;
12946
12947                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12948                               encoder->base.base.id,
12949                               encoder->base.name);
12950
12951                 for_each_intel_connector(dev, connector) {
12952                         if (connector->base.state->best_encoder != &encoder->base)
12953                                 continue;
12954                         enabled = true;
12955
12956                         I915_STATE_WARN(connector->base.state->crtc !=
12957                                         encoder->base.crtc,
12958                              "connector's crtc doesn't match encoder crtc\n");
12959                 }
12960
12961                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12962                      "encoder's enabled state mismatch "
12963                      "(expected %i, found %i)\n",
12964                      !!encoder->base.crtc, enabled);
12965
12966                 if (!encoder->base.crtc) {
12967                         bool active;
12968
12969                         active = encoder->get_hw_state(encoder, &pipe);
12970                         I915_STATE_WARN(active,
12971                              "encoder detached but still enabled on pipe %c.\n",
12972                              pipe_name(pipe));
12973                 }
12974         }
12975 }
12976
12977 static void
12978 verify_crtc_state(struct drm_crtc *crtc,
12979                   struct drm_crtc_state *old_crtc_state,
12980                   struct drm_crtc_state *new_crtc_state)
12981 {
12982         struct drm_device *dev = crtc->dev;
12983         struct drm_i915_private *dev_priv = to_i915(dev);
12984         struct intel_encoder *encoder;
12985         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12986         struct intel_crtc_state *pipe_config, *sw_config;
12987         struct drm_atomic_state *old_state;
12988         bool active;
12989
12990         old_state = old_crtc_state->state;
12991         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12992         pipe_config = to_intel_crtc_state(old_crtc_state);
12993         memset(pipe_config, 0, sizeof(*pipe_config));
12994         pipe_config->base.crtc = crtc;
12995         pipe_config->base.state = old_state;
12996
12997         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12998
12999         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
13000
13001         /* hw state is inconsistent with the pipe quirk */
13002         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
13003             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
13004                 active = new_crtc_state->active;
13005
13006         I915_STATE_WARN(new_crtc_state->active != active,
13007              "crtc active state doesn't match with hw state "
13008              "(expected %i, found %i)\n", new_crtc_state->active, active);
13009
13010         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
13011              "transitional active state does not match atomic hw state "
13012              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
13013
13014         for_each_encoder_on_crtc(dev, crtc, encoder) {
13015                 enum pipe pipe;
13016
13017                 active = encoder->get_hw_state(encoder, &pipe);
13018                 I915_STATE_WARN(active != new_crtc_state->active,
13019                         "[ENCODER:%i] active %i with crtc active %i\n",
13020                         encoder->base.base.id, active, new_crtc_state->active);
13021
13022                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13023                                 "Encoder connected to wrong pipe %c\n",
13024                                 pipe_name(pipe));
13025
13026                 if (active) {
13027                         pipe_config->output_types |= 1 << encoder->type;
13028                         encoder->get_config(encoder, pipe_config);
13029                 }
13030         }
13031
13032         if (!new_crtc_state->active)
13033                 return;
13034
13035         intel_pipe_config_sanity_check(dev_priv, pipe_config);
13036
13037         sw_config = to_intel_crtc_state(crtc->state);
13038         if (!intel_pipe_config_compare(dev, sw_config,
13039                                        pipe_config, false)) {
13040                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13041                 intel_dump_pipe_config(intel_crtc, pipe_config,
13042                                        "[hw state]");
13043                 intel_dump_pipe_config(intel_crtc, sw_config,
13044                                        "[sw state]");
13045         }
13046 }
13047
13048 static void
13049 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13050                          struct intel_shared_dpll *pll,
13051                          struct drm_crtc *crtc,
13052                          struct drm_crtc_state *new_state)
13053 {
13054         struct intel_dpll_hw_state dpll_hw_state;
13055         unsigned crtc_mask;
13056         bool active;
13057
13058         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13059
13060         DRM_DEBUG_KMS("%s\n", pll->name);
13061
13062         active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13063
13064         if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13065                 I915_STATE_WARN(!pll->on && pll->active_mask,
13066                      "pll in active use but not on in sw tracking\n");
13067                 I915_STATE_WARN(pll->on && !pll->active_mask,
13068                      "pll is on but not used by any active crtc\n");
13069                 I915_STATE_WARN(pll->on != active,
13070                      "pll on state mismatch (expected %i, found %i)\n",
13071                      pll->on, active);
13072         }
13073
13074         if (!crtc) {
13075                 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13076                                 "more active pll users than references: %x vs %x\n",
13077                                 pll->active_mask, pll->config.crtc_mask);
13078
13079                 return;
13080         }
13081
13082         crtc_mask = 1 << drm_crtc_index(crtc);
13083
13084         if (new_state->active)
13085                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13086                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13087                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13088         else
13089                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13090                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13091                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13092
13093         I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13094                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13095                         crtc_mask, pll->config.crtc_mask);
13096
13097         I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13098                                           &dpll_hw_state,
13099                                           sizeof(dpll_hw_state)),
13100                         "pll hw state mismatch\n");
13101 }
13102
13103 static void
13104 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13105                          struct drm_crtc_state *old_crtc_state,
13106                          struct drm_crtc_state *new_crtc_state)
13107 {
13108         struct drm_i915_private *dev_priv = to_i915(dev);
13109         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13110         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13111
13112         if (new_state->shared_dpll)
13113                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13114
13115         if (old_state->shared_dpll &&
13116             old_state->shared_dpll != new_state->shared_dpll) {
13117                 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13118                 struct intel_shared_dpll *pll = old_state->shared_dpll;
13119
13120                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13121                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13122                                 pipe_name(drm_crtc_index(crtc)));
13123                 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13124                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13125                                 pipe_name(drm_crtc_index(crtc)));
13126         }
13127 }
13128
13129 static void
13130 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13131                          struct drm_crtc_state *old_state,
13132                          struct drm_crtc_state *new_state)
13133 {
13134         if (!needs_modeset(new_state) &&
13135             !to_intel_crtc_state(new_state)->update_pipe)
13136                 return;
13137
13138         verify_wm_state(crtc, new_state);
13139         verify_connector_state(crtc->dev, crtc);
13140         verify_crtc_state(crtc, old_state, new_state);
13141         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13142 }
13143
13144 static void
13145 verify_disabled_dpll_state(struct drm_device *dev)
13146 {
13147         struct drm_i915_private *dev_priv = to_i915(dev);
13148         int i;
13149
13150         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13151                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13152 }
13153
13154 static void
13155 intel_modeset_verify_disabled(struct drm_device *dev)
13156 {
13157         verify_encoder_state(dev);
13158         verify_connector_state(dev, NULL);
13159         verify_disabled_dpll_state(dev);
13160 }
13161
13162 static void update_scanline_offset(struct intel_crtc *crtc)
13163 {
13164         struct drm_device *dev = crtc->base.dev;
13165
13166         /*
13167          * The scanline counter increments at the leading edge of hsync.
13168          *
13169          * On most platforms it starts counting from vtotal-1 on the
13170          * first active line. That means the scanline counter value is
13171          * always one less than what we would expect. Ie. just after
13172          * start of vblank, which also occurs at start of hsync (on the
13173          * last active line), the scanline counter will read vblank_start-1.
13174          *
13175          * On gen2 the scanline counter starts counting from 1 instead
13176          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13177          * to keep the value positive), instead of adding one.
13178          *
13179          * On HSW+ the behaviour of the scanline counter depends on the output
13180          * type. For DP ports it behaves like most other platforms, but on HDMI
13181          * there's an extra 1 line difference. So we need to add two instead of
13182          * one to the value.
13183          */
13184         if (IS_GEN2(dev)) {
13185                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13186                 int vtotal;
13187
13188                 vtotal = adjusted_mode->crtc_vtotal;
13189                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13190                         vtotal /= 2;
13191
13192                 crtc->scanline_offset = vtotal - 1;
13193         } else if (HAS_DDI(dev) &&
13194                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
13195                 crtc->scanline_offset = 2;
13196         } else
13197                 crtc->scanline_offset = 1;
13198 }
13199
13200 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13201 {
13202         struct drm_device *dev = state->dev;
13203         struct drm_i915_private *dev_priv = to_i915(dev);
13204         struct intel_shared_dpll_config *shared_dpll = NULL;
13205         struct drm_crtc *crtc;
13206         struct drm_crtc_state *crtc_state;
13207         int i;
13208
13209         if (!dev_priv->display.crtc_compute_clock)
13210                 return;
13211
13212         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13213                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13214                 struct intel_shared_dpll *old_dpll =
13215                         to_intel_crtc_state(crtc->state)->shared_dpll;
13216
13217                 if (!needs_modeset(crtc_state))
13218                         continue;
13219
13220                 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13221
13222                 if (!old_dpll)
13223                         continue;
13224
13225                 if (!shared_dpll)
13226                         shared_dpll = intel_atomic_get_shared_dpll_state(state);
13227
13228                 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13229         }
13230 }
13231
13232 /*
13233  * This implements the workaround described in the "notes" section of the mode
13234  * set sequence documentation. When going from no pipes or single pipe to
13235  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13236  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13237  */
13238 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13239 {
13240         struct drm_crtc_state *crtc_state;
13241         struct intel_crtc *intel_crtc;
13242         struct drm_crtc *crtc;
13243         struct intel_crtc_state *first_crtc_state = NULL;
13244         struct intel_crtc_state *other_crtc_state = NULL;
13245         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13246         int i;
13247
13248         /* look at all crtc's that are going to be enabled in during modeset */
13249         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13250                 intel_crtc = to_intel_crtc(crtc);
13251
13252                 if (!crtc_state->active || !needs_modeset(crtc_state))
13253                         continue;
13254
13255                 if (first_crtc_state) {
13256                         other_crtc_state = to_intel_crtc_state(crtc_state);
13257                         break;
13258                 } else {
13259                         first_crtc_state = to_intel_crtc_state(crtc_state);
13260                         first_pipe = intel_crtc->pipe;
13261                 }
13262         }
13263
13264         /* No workaround needed? */
13265         if (!first_crtc_state)
13266                 return 0;
13267
13268         /* w/a possibly needed, check how many crtc's are already enabled. */
13269         for_each_intel_crtc(state->dev, intel_crtc) {
13270                 struct intel_crtc_state *pipe_config;
13271
13272                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13273                 if (IS_ERR(pipe_config))
13274                         return PTR_ERR(pipe_config);
13275
13276                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13277
13278                 if (!pipe_config->base.active ||
13279                     needs_modeset(&pipe_config->base))
13280                         continue;
13281
13282                 /* 2 or more enabled crtcs means no need for w/a */
13283                 if (enabled_pipe != INVALID_PIPE)
13284                         return 0;
13285
13286                 enabled_pipe = intel_crtc->pipe;
13287         }
13288
13289         if (enabled_pipe != INVALID_PIPE)
13290                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13291         else if (other_crtc_state)
13292                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13293
13294         return 0;
13295 }
13296
13297 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13298 {
13299         struct drm_crtc *crtc;
13300         struct drm_crtc_state *crtc_state;
13301         int ret = 0;
13302
13303         /* add all active pipes to the state */
13304         for_each_crtc(state->dev, crtc) {
13305                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13306                 if (IS_ERR(crtc_state))
13307                         return PTR_ERR(crtc_state);
13308
13309                 if (!crtc_state->active || needs_modeset(crtc_state))
13310                         continue;
13311
13312                 crtc_state->mode_changed = true;
13313
13314                 ret = drm_atomic_add_affected_connectors(state, crtc);
13315                 if (ret)
13316                         break;
13317
13318                 ret = drm_atomic_add_affected_planes(state, crtc);
13319                 if (ret)
13320                         break;
13321         }
13322
13323         return ret;
13324 }
13325
13326 static int intel_modeset_checks(struct drm_atomic_state *state)
13327 {
13328         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13329         struct drm_i915_private *dev_priv = to_i915(state->dev);
13330         struct drm_crtc *crtc;
13331         struct drm_crtc_state *crtc_state;
13332         int ret = 0, i;
13333
13334         if (!check_digital_port_conflicts(state)) {
13335                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13336                 return -EINVAL;
13337         }
13338
13339         intel_state->modeset = true;
13340         intel_state->active_crtcs = dev_priv->active_crtcs;
13341
13342         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13343                 if (crtc_state->active)
13344                         intel_state->active_crtcs |= 1 << i;
13345                 else
13346                         intel_state->active_crtcs &= ~(1 << i);
13347
13348                 if (crtc_state->active != crtc->state->active)
13349                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13350         }
13351
13352         /*
13353          * See if the config requires any additional preparation, e.g.
13354          * to adjust global state with pipes off.  We need to do this
13355          * here so we can get the modeset_pipe updated config for the new
13356          * mode set on this crtc.  For other crtcs we need to use the
13357          * adjusted_mode bits in the crtc directly.
13358          */
13359         if (dev_priv->display.modeset_calc_cdclk) {
13360                 if (!intel_state->cdclk_pll_vco)
13361                         intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13362                 if (!intel_state->cdclk_pll_vco)
13363                         intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13364
13365                 ret = dev_priv->display.modeset_calc_cdclk(state);
13366                 if (ret < 0)
13367                         return ret;
13368
13369                 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13370                     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13371                         ret = intel_modeset_all_pipes(state);
13372
13373                 if (ret < 0)
13374                         return ret;
13375
13376                 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13377                               intel_state->cdclk, intel_state->dev_cdclk);
13378         } else
13379                 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13380
13381         intel_modeset_clear_plls(state);
13382
13383         if (IS_HASWELL(dev_priv))
13384                 return haswell_mode_set_planes_workaround(state);
13385
13386         return 0;
13387 }
13388
13389 /*
13390  * Handle calculation of various watermark data at the end of the atomic check
13391  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13392  * handlers to ensure that all derived state has been updated.
13393  */
13394 static int calc_watermark_data(struct drm_atomic_state *state)
13395 {
13396         struct drm_device *dev = state->dev;
13397         struct drm_i915_private *dev_priv = to_i915(dev);
13398
13399         /* Is there platform-specific watermark information to calculate? */
13400         if (dev_priv->display.compute_global_watermarks)
13401                 return dev_priv->display.compute_global_watermarks(state);
13402
13403         return 0;
13404 }
13405
13406 /**
13407  * intel_atomic_check - validate state object
13408  * @dev: drm device
13409  * @state: state to validate
13410  */
13411 static int intel_atomic_check(struct drm_device *dev,
13412                               struct drm_atomic_state *state)
13413 {
13414         struct drm_i915_private *dev_priv = to_i915(dev);
13415         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13416         struct drm_crtc *crtc;
13417         struct drm_crtc_state *crtc_state;
13418         int ret, i;
13419         bool any_ms = false;
13420
13421         ret = drm_atomic_helper_check_modeset(dev, state);
13422         if (ret)
13423                 return ret;
13424
13425         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13426                 struct intel_crtc_state *pipe_config =
13427                         to_intel_crtc_state(crtc_state);
13428
13429                 /* Catch I915_MODE_FLAG_INHERITED */
13430                 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13431                         crtc_state->mode_changed = true;
13432
13433                 if (!needs_modeset(crtc_state))
13434                         continue;
13435
13436                 if (!crtc_state->enable) {
13437                         any_ms = true;
13438                         continue;
13439                 }
13440
13441                 /* FIXME: For only active_changed we shouldn't need to do any
13442                  * state recomputation at all. */
13443
13444                 ret = drm_atomic_add_affected_connectors(state, crtc);
13445                 if (ret)
13446                         return ret;
13447
13448                 ret = intel_modeset_pipe_config(crtc, pipe_config);
13449                 if (ret) {
13450                         intel_dump_pipe_config(to_intel_crtc(crtc),
13451                                                pipe_config, "[failed]");
13452                         return ret;
13453                 }
13454
13455                 if (i915.fastboot &&
13456                     intel_pipe_config_compare(dev,
13457                                         to_intel_crtc_state(crtc->state),
13458                                         pipe_config, true)) {
13459                         crtc_state->mode_changed = false;
13460                         to_intel_crtc_state(crtc_state)->update_pipe = true;
13461                 }
13462
13463                 if (needs_modeset(crtc_state))
13464                         any_ms = true;
13465
13466                 ret = drm_atomic_add_affected_planes(state, crtc);
13467                 if (ret)
13468                         return ret;
13469
13470                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13471                                        needs_modeset(crtc_state) ?
13472                                        "[modeset]" : "[fastset]");
13473         }
13474
13475         if (any_ms) {
13476                 ret = intel_modeset_checks(state);
13477
13478                 if (ret)
13479                         return ret;
13480         } else
13481                 intel_state->cdclk = dev_priv->cdclk_freq;
13482
13483         ret = drm_atomic_helper_check_planes(dev, state);
13484         if (ret)
13485                 return ret;
13486
13487         intel_fbc_choose_crtc(dev_priv, state);
13488         return calc_watermark_data(state);
13489 }
13490
13491 static int intel_atomic_prepare_commit(struct drm_device *dev,
13492                                        struct drm_atomic_state *state,
13493                                        bool nonblock)
13494 {
13495         struct drm_i915_private *dev_priv = to_i915(dev);
13496         struct drm_plane_state *plane_state;
13497         struct drm_crtc_state *crtc_state;
13498         struct drm_plane *plane;
13499         struct drm_crtc *crtc;
13500         int i, ret;
13501
13502         for_each_crtc_in_state(state, crtc, crtc_state, i) {
13503                 if (state->legacy_cursor_update)
13504                         continue;
13505
13506                 ret = intel_crtc_wait_for_pending_flips(crtc);
13507                 if (ret)
13508                         return ret;
13509
13510                 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13511                         flush_workqueue(dev_priv->wq);
13512         }
13513
13514         ret = mutex_lock_interruptible(&dev->struct_mutex);
13515         if (ret)
13516                 return ret;
13517
13518         ret = drm_atomic_helper_prepare_planes(dev, state);
13519         mutex_unlock(&dev->struct_mutex);
13520
13521         if (!ret && !nonblock) {
13522                 for_each_plane_in_state(state, plane, plane_state, i) {
13523                         struct intel_plane_state *intel_plane_state =
13524                                 to_intel_plane_state(plane_state);
13525
13526                         if (!intel_plane_state->wait_req)
13527                                 continue;
13528
13529                         ret = __i915_wait_request(intel_plane_state->wait_req,
13530                                                   true, NULL, NULL);
13531                         if (ret) {
13532                                 /* Any hang should be swallowed by the wait */
13533                                 WARN_ON(ret == -EIO);
13534                                 mutex_lock(&dev->struct_mutex);
13535                                 drm_atomic_helper_cleanup_planes(dev, state);
13536                                 mutex_unlock(&dev->struct_mutex);
13537                                 break;
13538                         }
13539                 }
13540         }
13541
13542         return ret;
13543 }
13544
13545 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13546 {
13547         struct drm_device *dev = crtc->base.dev;
13548
13549         if (!dev->max_vblank_count)
13550                 return drm_accurate_vblank_count(&crtc->base);
13551
13552         return dev->driver->get_vblank_counter(dev, crtc->pipe);
13553 }
13554
13555 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13556                                           struct drm_i915_private *dev_priv,
13557                                           unsigned crtc_mask)
13558 {
13559         unsigned last_vblank_count[I915_MAX_PIPES];
13560         enum pipe pipe;
13561         int ret;
13562
13563         if (!crtc_mask)
13564                 return;
13565
13566         for_each_pipe(dev_priv, pipe) {
13567                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13568
13569                 if (!((1 << pipe) & crtc_mask))
13570                         continue;
13571
13572                 ret = drm_crtc_vblank_get(crtc);
13573                 if (WARN_ON(ret != 0)) {
13574                         crtc_mask &= ~(1 << pipe);
13575                         continue;
13576                 }
13577
13578                 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13579         }
13580
13581         for_each_pipe(dev_priv, pipe) {
13582                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13583                 long lret;
13584
13585                 if (!((1 << pipe) & crtc_mask))
13586                         continue;
13587
13588                 lret = wait_event_timeout(dev->vblank[pipe].queue,
13589                                 last_vblank_count[pipe] !=
13590                                         drm_crtc_vblank_count(crtc),
13591                                 msecs_to_jiffies(50));
13592
13593                 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13594
13595                 drm_crtc_vblank_put(crtc);
13596         }
13597 }
13598
13599 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13600 {
13601         /* fb updated, need to unpin old fb */
13602         if (crtc_state->fb_changed)
13603                 return true;
13604
13605         /* wm changes, need vblank before final wm's */
13606         if (crtc_state->update_wm_post)
13607                 return true;
13608
13609         /*
13610          * cxsr is re-enabled after vblank.
13611          * This is already handled by crtc_state->update_wm_post,
13612          * but added for clarity.
13613          */
13614         if (crtc_state->disable_cxsr)
13615                 return true;
13616
13617         return false;
13618 }
13619
13620 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13621 {
13622         struct drm_device *dev = state->dev;
13623         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13624         struct drm_i915_private *dev_priv = to_i915(dev);
13625         struct drm_crtc_state *old_crtc_state;
13626         struct drm_crtc *crtc;
13627         struct intel_crtc_state *intel_cstate;
13628         struct drm_plane *plane;
13629         struct drm_plane_state *plane_state;
13630         bool hw_check = intel_state->modeset;
13631         unsigned long put_domains[I915_MAX_PIPES] = {};
13632         unsigned crtc_vblank_mask = 0;
13633         int i, ret;
13634
13635         for_each_plane_in_state(state, plane, plane_state, i) {
13636                 struct intel_plane_state *intel_plane_state =
13637                         to_intel_plane_state(plane_state);
13638
13639                 if (!intel_plane_state->wait_req)
13640                         continue;
13641
13642                 ret = __i915_wait_request(intel_plane_state->wait_req,
13643                                           true, NULL, NULL);
13644                 /* EIO should be eaten, and we can't get interrupted in the
13645                  * worker, and blocking commits have waited already. */
13646                 WARN_ON(ret);
13647         }
13648
13649         drm_atomic_helper_wait_for_dependencies(state);
13650
13651         if (intel_state->modeset) {
13652                 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13653                        sizeof(intel_state->min_pixclk));
13654                 dev_priv->active_crtcs = intel_state->active_crtcs;
13655                 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13656
13657                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13658         }
13659
13660         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13661                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13662
13663                 if (needs_modeset(crtc->state) ||
13664                     to_intel_crtc_state(crtc->state)->update_pipe) {
13665                         hw_check = true;
13666
13667                         put_domains[to_intel_crtc(crtc)->pipe] =
13668                                 modeset_get_crtc_power_domains(crtc,
13669                                         to_intel_crtc_state(crtc->state));
13670                 }
13671
13672                 if (!needs_modeset(crtc->state))
13673                         continue;
13674
13675                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13676
13677                 if (old_crtc_state->active) {
13678                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13679                         dev_priv->display.crtc_disable(crtc);
13680                         intel_crtc->active = false;
13681                         intel_fbc_disable(intel_crtc);
13682                         intel_disable_shared_dpll(intel_crtc);
13683
13684                         /*
13685                          * Underruns don't always raise
13686                          * interrupts, so check manually.
13687                          */
13688                         intel_check_cpu_fifo_underruns(dev_priv);
13689                         intel_check_pch_fifo_underruns(dev_priv);
13690
13691                         if (!crtc->state->active)
13692                                 intel_update_watermarks(crtc);
13693                 }
13694         }
13695
13696         /* Only after disabling all output pipelines that will be changed can we
13697          * update the the output configuration. */
13698         intel_modeset_update_crtc_state(state);
13699
13700         if (intel_state->modeset) {
13701                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13702
13703                 if (dev_priv->display.modeset_commit_cdclk &&
13704                     (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13705                      intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13706                         dev_priv->display.modeset_commit_cdclk(state);
13707
13708                 intel_modeset_verify_disabled(dev);
13709         }
13710
13711         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13712         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13713                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13714                 bool modeset = needs_modeset(crtc->state);
13715                 struct intel_crtc_state *pipe_config =
13716                         to_intel_crtc_state(crtc->state);
13717
13718                 if (modeset && crtc->state->active) {
13719                         update_scanline_offset(to_intel_crtc(crtc));
13720                         dev_priv->display.crtc_enable(crtc);
13721                 }
13722
13723                 /* Complete events for now disable pipes here. */
13724                 if (modeset && !crtc->state->active && crtc->state->event) {
13725                         spin_lock_irq(&dev->event_lock);
13726                         drm_crtc_send_vblank_event(crtc, crtc->state->event);
13727                         spin_unlock_irq(&dev->event_lock);
13728
13729                         crtc->state->event = NULL;
13730                 }
13731
13732                 if (!modeset)
13733                         intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13734
13735                 if (crtc->state->active &&
13736                     drm_atomic_get_existing_plane_state(state, crtc->primary))
13737                         intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
13738
13739                 if (crtc->state->active)
13740                         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13741
13742                 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13743                         crtc_vblank_mask |= 1 << i;
13744         }
13745
13746         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13747          * already, but still need the state for the delayed optimization. To
13748          * fix this:
13749          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13750          * - schedule that vblank worker _before_ calling hw_done
13751          * - at the start of commit_tail, cancel it _synchrously
13752          * - switch over to the vblank wait helper in the core after that since
13753          *   we don't need out special handling any more.
13754          */
13755         if (!state->legacy_cursor_update)
13756                 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13757
13758         /*
13759          * Now that the vblank has passed, we can go ahead and program the
13760          * optimal watermarks on platforms that need two-step watermark
13761          * programming.
13762          *
13763          * TODO: Move this (and other cleanup) to an async worker eventually.
13764          */
13765         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13766                 intel_cstate = to_intel_crtc_state(crtc->state);
13767
13768                 if (dev_priv->display.optimize_watermarks)
13769                         dev_priv->display.optimize_watermarks(intel_cstate);
13770         }
13771
13772         for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13773                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13774
13775                 if (put_domains[i])
13776                         modeset_put_power_domains(dev_priv, put_domains[i]);
13777
13778                 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13779         }
13780
13781         drm_atomic_helper_commit_hw_done(state);
13782
13783         if (intel_state->modeset)
13784                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13785
13786         mutex_lock(&dev->struct_mutex);
13787         drm_atomic_helper_cleanup_planes(dev, state);
13788         mutex_unlock(&dev->struct_mutex);
13789
13790         drm_atomic_helper_commit_cleanup_done(state);
13791
13792         drm_atomic_state_free(state);
13793
13794         /* As one of the primary mmio accessors, KMS has a high likelihood
13795          * of triggering bugs in unclaimed access. After we finish
13796          * modesetting, see if an error has been flagged, and if so
13797          * enable debugging for the next modeset - and hope we catch
13798          * the culprit.
13799          *
13800          * XXX note that we assume display power is on at this point.
13801          * This might hold true now but we need to add pm helper to check
13802          * unclaimed only when the hardware is on, as atomic commits
13803          * can happen also when the device is completely off.
13804          */
13805         intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13806 }
13807
13808 static void intel_atomic_commit_work(struct work_struct *work)
13809 {
13810         struct drm_atomic_state *state = container_of(work,
13811                                                       struct drm_atomic_state,
13812                                                       commit_work);
13813         intel_atomic_commit_tail(state);
13814 }
13815
13816 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13817 {
13818         struct drm_plane_state *old_plane_state;
13819         struct drm_plane *plane;
13820         struct drm_i915_gem_object *obj, *old_obj;
13821         struct intel_plane *intel_plane;
13822         int i;
13823
13824         mutex_lock(&state->dev->struct_mutex);
13825         for_each_plane_in_state(state, plane, old_plane_state, i) {
13826                 obj = intel_fb_obj(plane->state->fb);
13827                 old_obj = intel_fb_obj(old_plane_state->fb);
13828                 intel_plane = to_intel_plane(plane);
13829
13830                 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13831         }
13832         mutex_unlock(&state->dev->struct_mutex);
13833 }
13834
13835 /**
13836  * intel_atomic_commit - commit validated state object
13837  * @dev: DRM device
13838  * @state: the top-level driver state object
13839  * @nonblock: nonblocking commit
13840  *
13841  * This function commits a top-level state object that has been validated
13842  * with drm_atomic_helper_check().
13843  *
13844  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13845  * nonblocking commits are only safe for pure plane updates. Everything else
13846  * should work though.
13847  *
13848  * RETURNS
13849  * Zero for success or -errno.
13850  */
13851 static int intel_atomic_commit(struct drm_device *dev,
13852                                struct drm_atomic_state *state,
13853                                bool nonblock)
13854 {
13855         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13856         struct drm_i915_private *dev_priv = to_i915(dev);
13857         int ret = 0;
13858
13859         if (intel_state->modeset && nonblock) {
13860                 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13861                 return -EINVAL;
13862         }
13863
13864         ret = drm_atomic_helper_setup_commit(state, nonblock);
13865         if (ret)
13866                 return ret;
13867
13868         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13869
13870         ret = intel_atomic_prepare_commit(dev, state, nonblock);
13871         if (ret) {
13872                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13873                 return ret;
13874         }
13875
13876         drm_atomic_helper_swap_state(state, true);
13877         dev_priv->wm.distrust_bios_wm = false;
13878         dev_priv->wm.skl_results = intel_state->wm_results;
13879         intel_shared_dpll_commit(state);
13880         intel_atomic_track_fbs(state);
13881
13882         if (nonblock)
13883                 queue_work(system_unbound_wq, &state->commit_work);
13884         else
13885                 intel_atomic_commit_tail(state);
13886
13887         return 0;
13888 }
13889
13890 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13891 {
13892         struct drm_device *dev = crtc->dev;
13893         struct drm_atomic_state *state;
13894         struct drm_crtc_state *crtc_state;
13895         int ret;
13896
13897         state = drm_atomic_state_alloc(dev);
13898         if (!state) {
13899                 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13900                               crtc->base.id, crtc->name);
13901                 return;
13902         }
13903
13904         state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13905
13906 retry:
13907         crtc_state = drm_atomic_get_crtc_state(state, crtc);
13908         ret = PTR_ERR_OR_ZERO(crtc_state);
13909         if (!ret) {
13910                 if (!crtc_state->active)
13911                         goto out;
13912
13913                 crtc_state->mode_changed = true;
13914                 ret = drm_atomic_commit(state);
13915         }
13916
13917         if (ret == -EDEADLK) {
13918                 drm_atomic_state_clear(state);
13919                 drm_modeset_backoff(state->acquire_ctx);
13920                 goto retry;
13921         }
13922
13923         if (ret)
13924 out:
13925                 drm_atomic_state_free(state);
13926 }
13927
13928 #undef for_each_intel_crtc_masked
13929
13930 static const struct drm_crtc_funcs intel_crtc_funcs = {
13931         .gamma_set = drm_atomic_helper_legacy_gamma_set,
13932         .set_config = drm_atomic_helper_set_config,
13933         .set_property = drm_atomic_helper_crtc_set_property,
13934         .destroy = intel_crtc_destroy,
13935         .page_flip = intel_crtc_page_flip,
13936         .atomic_duplicate_state = intel_crtc_duplicate_state,
13937         .atomic_destroy_state = intel_crtc_destroy_state,
13938 };
13939
13940 /**
13941  * intel_prepare_plane_fb - Prepare fb for usage on plane
13942  * @plane: drm plane to prepare for
13943  * @fb: framebuffer to prepare for presentation
13944  *
13945  * Prepares a framebuffer for usage on a display plane.  Generally this
13946  * involves pinning the underlying object and updating the frontbuffer tracking
13947  * bits.  Some older platforms need special physical address handling for
13948  * cursor planes.
13949  *
13950  * Must be called with struct_mutex held.
13951  *
13952  * Returns 0 on success, negative error code on failure.
13953  */
13954 int
13955 intel_prepare_plane_fb(struct drm_plane *plane,
13956                        const struct drm_plane_state *new_state)
13957 {
13958         struct drm_device *dev = plane->dev;
13959         struct drm_framebuffer *fb = new_state->fb;
13960         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13961         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13962         struct reservation_object *resv;
13963         int ret = 0;
13964
13965         if (!obj && !old_obj)
13966                 return 0;
13967
13968         if (old_obj) {
13969                 struct drm_crtc_state *crtc_state =
13970                         drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13971
13972                 /* Big Hammer, we also need to ensure that any pending
13973                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13974                  * current scanout is retired before unpinning the old
13975                  * framebuffer. Note that we rely on userspace rendering
13976                  * into the buffer attached to the pipe they are waiting
13977                  * on. If not, userspace generates a GPU hang with IPEHR
13978                  * point to the MI_WAIT_FOR_EVENT.
13979                  *
13980                  * This should only fail upon a hung GPU, in which case we
13981                  * can safely continue.
13982                  */
13983                 if (needs_modeset(crtc_state))
13984                         ret = i915_gem_object_wait_rendering(old_obj, true);
13985                 if (ret) {
13986                         /* GPU hangs should have been swallowed by the wait */
13987                         WARN_ON(ret == -EIO);
13988                         return ret;
13989                 }
13990         }
13991
13992         if (!obj)
13993                 return 0;
13994
13995         /* For framebuffer backed by dmabuf, wait for fence */
13996         resv = i915_gem_object_get_dmabuf_resv(obj);
13997         if (resv) {
13998                 long lret;
13999
14000                 lret = reservation_object_wait_timeout_rcu(resv, false, true,
14001                                                            MAX_SCHEDULE_TIMEOUT);
14002                 if (lret == -ERESTARTSYS)
14003                         return lret;
14004
14005                 WARN(lret < 0, "waiting returns %li\n", lret);
14006         }
14007
14008         if (plane->type == DRM_PLANE_TYPE_CURSOR &&
14009             INTEL_INFO(dev)->cursor_needs_physical) {
14010                 int align = IS_I830(dev) ? 16 * 1024 : 256;
14011                 ret = i915_gem_object_attach_phys(obj, align);
14012                 if (ret)
14013                         DRM_DEBUG_KMS("failed to attach phys object\n");
14014         } else {
14015                 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
14016         }
14017
14018         if (ret == 0) {
14019                 struct intel_plane_state *plane_state =
14020                         to_intel_plane_state(new_state);
14021
14022                 i915_gem_request_assign(&plane_state->wait_req,
14023                                         obj->last_write_req);
14024         }
14025
14026         return ret;
14027 }
14028
14029 /**
14030  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14031  * @plane: drm plane to clean up for
14032  * @fb: old framebuffer that was on plane
14033  *
14034  * Cleans up a framebuffer that has just been removed from a plane.
14035  *
14036  * Must be called with struct_mutex held.
14037  */
14038 void
14039 intel_cleanup_plane_fb(struct drm_plane *plane,
14040                        const struct drm_plane_state *old_state)
14041 {
14042         struct drm_device *dev = plane->dev;
14043         struct intel_plane_state *old_intel_state;
14044         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14045         struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14046
14047         old_intel_state = to_intel_plane_state(old_state);
14048
14049         if (!obj && !old_obj)
14050                 return;
14051
14052         if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
14053             !INTEL_INFO(dev)->cursor_needs_physical))
14054                 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
14055
14056         i915_gem_request_assign(&old_intel_state->wait_req, NULL);
14057 }
14058
14059 int
14060 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14061 {
14062         int max_scale;
14063         int crtc_clock, cdclk;
14064
14065         if (!intel_crtc || !crtc_state->base.enable)
14066                 return DRM_PLANE_HELPER_NO_SCALING;
14067
14068         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14069         cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
14070
14071         if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
14072                 return DRM_PLANE_HELPER_NO_SCALING;
14073
14074         /*
14075          * skl max scale is lower of:
14076          *    close to 3 but not 3, -1 is for that purpose
14077          *            or
14078          *    cdclk/crtc_clock
14079          */
14080         max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
14081
14082         return max_scale;
14083 }
14084
14085 static int
14086 intel_check_primary_plane(struct drm_plane *plane,
14087                           struct intel_crtc_state *crtc_state,
14088                           struct intel_plane_state *state)
14089 {
14090         struct drm_crtc *crtc = state->base.crtc;
14091         struct drm_framebuffer *fb = state->base.fb;
14092         int min_scale = DRM_PLANE_HELPER_NO_SCALING;
14093         int max_scale = DRM_PLANE_HELPER_NO_SCALING;
14094         bool can_position = false;
14095
14096         if (INTEL_INFO(plane->dev)->gen >= 9) {
14097                 /* use scaler when colorkey is not required */
14098                 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
14099                         min_scale = 1;
14100                         max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
14101                 }
14102                 can_position = true;
14103         }
14104
14105         return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14106                                              &state->dst, &state->clip,
14107                                              state->base.rotation,
14108                                              min_scale, max_scale,
14109                                              can_position, true,
14110                                              &state->visible);
14111 }
14112
14113 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14114                                     struct drm_crtc_state *old_crtc_state)
14115 {
14116         struct drm_device *dev = crtc->dev;
14117         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14118         struct intel_crtc_state *old_intel_state =
14119                 to_intel_crtc_state(old_crtc_state);
14120         bool modeset = needs_modeset(crtc->state);
14121
14122         /* Perform vblank evasion around commit operation */
14123         intel_pipe_update_start(intel_crtc);
14124
14125         if (modeset)
14126                 return;
14127
14128         if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14129                 intel_color_set_csc(crtc->state);
14130                 intel_color_load_luts(crtc->state);
14131         }
14132
14133         if (to_intel_crtc_state(crtc->state)->update_pipe)
14134                 intel_update_pipe_config(intel_crtc, old_intel_state);
14135         else if (INTEL_INFO(dev)->gen >= 9)
14136                 skl_detach_scalers(intel_crtc);
14137 }
14138
14139 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14140                                      struct drm_crtc_state *old_crtc_state)
14141 {
14142         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14143
14144         intel_pipe_update_end(intel_crtc, NULL);
14145 }
14146
14147 /**
14148  * intel_plane_destroy - destroy a plane
14149  * @plane: plane to destroy
14150  *
14151  * Common destruction function for all types of planes (primary, cursor,
14152  * sprite).
14153  */
14154 void intel_plane_destroy(struct drm_plane *plane)
14155 {
14156         if (!plane)
14157                 return;
14158
14159         drm_plane_cleanup(plane);
14160         kfree(to_intel_plane(plane));
14161 }
14162
14163 const struct drm_plane_funcs intel_plane_funcs = {
14164         .update_plane = drm_atomic_helper_update_plane,
14165         .disable_plane = drm_atomic_helper_disable_plane,
14166         .destroy = intel_plane_destroy,
14167         .set_property = drm_atomic_helper_plane_set_property,
14168         .atomic_get_property = intel_plane_atomic_get_property,
14169         .atomic_set_property = intel_plane_atomic_set_property,
14170         .atomic_duplicate_state = intel_plane_duplicate_state,
14171         .atomic_destroy_state = intel_plane_destroy_state,
14172
14173 };
14174
14175 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14176                                                     int pipe)
14177 {
14178         struct intel_plane *primary = NULL;
14179         struct intel_plane_state *state = NULL;
14180         const uint32_t *intel_primary_formats;
14181         unsigned int num_formats;
14182         int ret;
14183
14184         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14185         if (!primary)
14186                 goto fail;
14187
14188         state = intel_create_plane_state(&primary->base);
14189         if (!state)
14190                 goto fail;
14191         primary->base.state = &state->base;
14192
14193         primary->can_scale = false;
14194         primary->max_downscale = 1;
14195         if (INTEL_INFO(dev)->gen >= 9) {
14196                 primary->can_scale = true;
14197                 state->scaler_id = -1;
14198         }
14199         primary->pipe = pipe;
14200         primary->plane = pipe;
14201         primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14202         primary->check_plane = intel_check_primary_plane;
14203         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14204                 primary->plane = !pipe;
14205
14206         if (INTEL_INFO(dev)->gen >= 9) {
14207                 intel_primary_formats = skl_primary_formats;
14208                 num_formats = ARRAY_SIZE(skl_primary_formats);
14209
14210                 primary->update_plane = skylake_update_primary_plane;
14211                 primary->disable_plane = skylake_disable_primary_plane;
14212         } else if (HAS_PCH_SPLIT(dev)) {
14213                 intel_primary_formats = i965_primary_formats;
14214                 num_formats = ARRAY_SIZE(i965_primary_formats);
14215
14216                 primary->update_plane = ironlake_update_primary_plane;
14217                 primary->disable_plane = i9xx_disable_primary_plane;
14218         } else if (INTEL_INFO(dev)->gen >= 4) {
14219                 intel_primary_formats = i965_primary_formats;
14220                 num_formats = ARRAY_SIZE(i965_primary_formats);
14221
14222                 primary->update_plane = i9xx_update_primary_plane;
14223                 primary->disable_plane = i9xx_disable_primary_plane;
14224         } else {
14225                 intel_primary_formats = i8xx_primary_formats;
14226                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14227
14228                 primary->update_plane = i9xx_update_primary_plane;
14229                 primary->disable_plane = i9xx_disable_primary_plane;
14230         }
14231
14232         if (INTEL_INFO(dev)->gen >= 9)
14233                 ret = drm_universal_plane_init(dev, &primary->base, 0,
14234                                                &intel_plane_funcs,
14235                                                intel_primary_formats, num_formats,
14236                                                DRM_PLANE_TYPE_PRIMARY,
14237                                                "plane 1%c", pipe_name(pipe));
14238         else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14239                 ret = drm_universal_plane_init(dev, &primary->base, 0,
14240                                                &intel_plane_funcs,
14241                                                intel_primary_formats, num_formats,
14242                                                DRM_PLANE_TYPE_PRIMARY,
14243                                                "primary %c", pipe_name(pipe));
14244         else
14245                 ret = drm_universal_plane_init(dev, &primary->base, 0,
14246                                                &intel_plane_funcs,
14247                                                intel_primary_formats, num_formats,
14248                                                DRM_PLANE_TYPE_PRIMARY,
14249                                                "plane %c", plane_name(primary->plane));
14250         if (ret)
14251                 goto fail;
14252
14253         if (INTEL_INFO(dev)->gen >= 4)
14254                 intel_create_rotation_property(dev, primary);
14255
14256         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14257
14258         return &primary->base;
14259
14260 fail:
14261         kfree(state);
14262         kfree(primary);
14263
14264         return NULL;
14265 }
14266
14267 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14268 {
14269         if (!dev->mode_config.rotation_property) {
14270                 unsigned long flags = BIT(DRM_ROTATE_0) |
14271                         BIT(DRM_ROTATE_180);
14272
14273                 if (INTEL_INFO(dev)->gen >= 9)
14274                         flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14275
14276                 dev->mode_config.rotation_property =
14277                         drm_mode_create_rotation_property(dev, flags);
14278         }
14279         if (dev->mode_config.rotation_property)
14280                 drm_object_attach_property(&plane->base.base,
14281                                 dev->mode_config.rotation_property,
14282                                 plane->base.state->rotation);
14283 }
14284
14285 static int
14286 intel_check_cursor_plane(struct drm_plane *plane,
14287                          struct intel_crtc_state *crtc_state,
14288                          struct intel_plane_state *state)
14289 {
14290         struct drm_crtc *crtc = crtc_state->base.crtc;
14291         struct drm_framebuffer *fb = state->base.fb;
14292         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14293         enum pipe pipe = to_intel_plane(plane)->pipe;
14294         unsigned stride;
14295         int ret;
14296
14297         ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14298                                             &state->dst, &state->clip,
14299                                             state->base.rotation,
14300                                             DRM_PLANE_HELPER_NO_SCALING,
14301                                             DRM_PLANE_HELPER_NO_SCALING,
14302                                             true, true, &state->visible);
14303         if (ret)
14304                 return ret;
14305
14306         /* if we want to turn off the cursor ignore width and height */
14307         if (!obj)
14308                 return 0;
14309
14310         /* Check for which cursor types we support */
14311         if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14312                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14313                           state->base.crtc_w, state->base.crtc_h);
14314                 return -EINVAL;
14315         }
14316
14317         stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14318         if (obj->base.size < stride * state->base.crtc_h) {
14319                 DRM_DEBUG_KMS("buffer is too small\n");
14320                 return -ENOMEM;
14321         }
14322
14323         if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14324                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
14325                 return -EINVAL;
14326         }
14327
14328         /*
14329          * There's something wrong with the cursor on CHV pipe C.
14330          * If it straddles the left edge of the screen then
14331          * moving it away from the edge or disabling it often
14332          * results in a pipe underrun, and often that can lead to
14333          * dead pipe (constant underrun reported, and it scans
14334          * out just a solid color). To recover from that, the
14335          * display power well must be turned off and on again.
14336          * Refuse the put the cursor into that compromised position.
14337          */
14338         if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14339             state->visible && state->base.crtc_x < 0) {
14340                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14341                 return -EINVAL;
14342         }
14343
14344         return 0;
14345 }
14346
14347 static void
14348 intel_disable_cursor_plane(struct drm_plane *plane,
14349                            struct drm_crtc *crtc)
14350 {
14351         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14352
14353         intel_crtc->cursor_addr = 0;
14354         intel_crtc_update_cursor(crtc, NULL);
14355 }
14356
14357 static void
14358 intel_update_cursor_plane(struct drm_plane *plane,
14359                           const struct intel_crtc_state *crtc_state,
14360                           const struct intel_plane_state *state)
14361 {
14362         struct drm_crtc *crtc = crtc_state->base.crtc;
14363         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14364         struct drm_device *dev = plane->dev;
14365         struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14366         uint32_t addr;
14367
14368         if (!obj)
14369                 addr = 0;
14370         else if (!INTEL_INFO(dev)->cursor_needs_physical)
14371                 addr = i915_gem_obj_ggtt_offset(obj);
14372         else
14373                 addr = obj->phys_handle->busaddr;
14374
14375         intel_crtc->cursor_addr = addr;
14376         intel_crtc_update_cursor(crtc, state);
14377 }
14378
14379 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14380                                                    int pipe)
14381 {
14382         struct intel_plane *cursor = NULL;
14383         struct intel_plane_state *state = NULL;
14384         int ret;
14385
14386         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14387         if (!cursor)
14388                 goto fail;
14389
14390         state = intel_create_plane_state(&cursor->base);
14391         if (!state)
14392                 goto fail;
14393         cursor->base.state = &state->base;
14394
14395         cursor->can_scale = false;
14396         cursor->max_downscale = 1;
14397         cursor->pipe = pipe;
14398         cursor->plane = pipe;
14399         cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14400         cursor->check_plane = intel_check_cursor_plane;
14401         cursor->update_plane = intel_update_cursor_plane;
14402         cursor->disable_plane = intel_disable_cursor_plane;
14403
14404         ret = drm_universal_plane_init(dev, &cursor->base, 0,
14405                                        &intel_plane_funcs,
14406                                        intel_cursor_formats,
14407                                        ARRAY_SIZE(intel_cursor_formats),
14408                                        DRM_PLANE_TYPE_CURSOR,
14409                                        "cursor %c", pipe_name(pipe));
14410         if (ret)
14411                 goto fail;
14412
14413         if (INTEL_INFO(dev)->gen >= 4) {
14414                 if (!dev->mode_config.rotation_property)
14415                         dev->mode_config.rotation_property =
14416                                 drm_mode_create_rotation_property(dev,
14417                                                         BIT(DRM_ROTATE_0) |
14418                                                         BIT(DRM_ROTATE_180));
14419                 if (dev->mode_config.rotation_property)
14420                         drm_object_attach_property(&cursor->base.base,
14421                                 dev->mode_config.rotation_property,
14422                                 state->base.rotation);
14423         }
14424
14425         if (INTEL_INFO(dev)->gen >=9)
14426                 state->scaler_id = -1;
14427
14428         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14429
14430         return &cursor->base;
14431
14432 fail:
14433         kfree(state);
14434         kfree(cursor);
14435
14436         return NULL;
14437 }
14438
14439 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14440         struct intel_crtc_state *crtc_state)
14441 {
14442         int i;
14443         struct intel_scaler *intel_scaler;
14444         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14445
14446         for (i = 0; i < intel_crtc->num_scalers; i++) {
14447                 intel_scaler = &scaler_state->scalers[i];
14448                 intel_scaler->in_use = 0;
14449                 intel_scaler->mode = PS_SCALER_MODE_DYN;
14450         }
14451
14452         scaler_state->scaler_id = -1;
14453 }
14454
14455 static void intel_crtc_init(struct drm_device *dev, int pipe)
14456 {
14457         struct drm_i915_private *dev_priv = to_i915(dev);
14458         struct intel_crtc *intel_crtc;
14459         struct intel_crtc_state *crtc_state = NULL;
14460         struct drm_plane *primary = NULL;
14461         struct drm_plane *cursor = NULL;
14462         int ret;
14463
14464         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14465         if (intel_crtc == NULL)
14466                 return;
14467
14468         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14469         if (!crtc_state)
14470                 goto fail;
14471         intel_crtc->config = crtc_state;
14472         intel_crtc->base.state = &crtc_state->base;
14473         crtc_state->base.crtc = &intel_crtc->base;
14474
14475         /* initialize shared scalers */
14476         if (INTEL_INFO(dev)->gen >= 9) {
14477                 if (pipe == PIPE_C)
14478                         intel_crtc->num_scalers = 1;
14479                 else
14480                         intel_crtc->num_scalers = SKL_NUM_SCALERS;
14481
14482                 skl_init_scalers(dev, intel_crtc, crtc_state);
14483         }
14484
14485         primary = intel_primary_plane_create(dev, pipe);
14486         if (!primary)
14487                 goto fail;
14488
14489         cursor = intel_cursor_plane_create(dev, pipe);
14490         if (!cursor)
14491                 goto fail;
14492
14493         ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14494                                         cursor, &intel_crtc_funcs,
14495                                         "pipe %c", pipe_name(pipe));
14496         if (ret)
14497                 goto fail;
14498
14499         /*
14500          * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14501          * is hooked to pipe B. Hence we want plane A feeding pipe B.
14502          */
14503         intel_crtc->pipe = pipe;
14504         intel_crtc->plane = pipe;
14505         if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14506                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14507                 intel_crtc->plane = !pipe;
14508         }
14509
14510         intel_crtc->cursor_base = ~0;
14511         intel_crtc->cursor_cntl = ~0;
14512         intel_crtc->cursor_size = ~0;
14513
14514         intel_crtc->wm.cxsr_allowed = true;
14515
14516         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14517                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14518         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14519         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14520
14521         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14522
14523         intel_color_init(&intel_crtc->base);
14524
14525         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14526         return;
14527
14528 fail:
14529         intel_plane_destroy(primary);
14530         intel_plane_destroy(cursor);
14531         kfree(crtc_state);
14532         kfree(intel_crtc);
14533 }
14534
14535 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14536 {
14537         struct drm_encoder *encoder = connector->base.encoder;
14538         struct drm_device *dev = connector->base.dev;
14539
14540         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14541
14542         if (!encoder || WARN_ON(!encoder->crtc))
14543                 return INVALID_PIPE;
14544
14545         return to_intel_crtc(encoder->crtc)->pipe;
14546 }
14547
14548 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14549                                 struct drm_file *file)
14550 {
14551         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14552         struct drm_crtc *drmmode_crtc;
14553         struct intel_crtc *crtc;
14554
14555         drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14556         if (!drmmode_crtc)
14557                 return -ENOENT;
14558
14559         crtc = to_intel_crtc(drmmode_crtc);
14560         pipe_from_crtc_id->pipe = crtc->pipe;
14561
14562         return 0;
14563 }
14564
14565 static int intel_encoder_clones(struct intel_encoder *encoder)
14566 {
14567         struct drm_device *dev = encoder->base.dev;
14568         struct intel_encoder *source_encoder;
14569         int index_mask = 0;
14570         int entry = 0;
14571
14572         for_each_intel_encoder(dev, source_encoder) {
14573                 if (encoders_cloneable(encoder, source_encoder))
14574                         index_mask |= (1 << entry);
14575
14576                 entry++;
14577         }
14578
14579         return index_mask;
14580 }
14581
14582 static bool has_edp_a(struct drm_device *dev)
14583 {
14584         struct drm_i915_private *dev_priv = to_i915(dev);
14585
14586         if (!IS_MOBILE(dev))
14587                 return false;
14588
14589         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14590                 return false;
14591
14592         if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14593                 return false;
14594
14595         return true;
14596 }
14597
14598 static bool intel_crt_present(struct drm_device *dev)
14599 {
14600         struct drm_i915_private *dev_priv = to_i915(dev);
14601
14602         if (INTEL_INFO(dev)->gen >= 9)
14603                 return false;
14604
14605         if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14606                 return false;
14607
14608         if (IS_CHERRYVIEW(dev))
14609                 return false;
14610
14611         if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14612                 return false;
14613
14614         /* DDI E can't be used if DDI A requires 4 lanes */
14615         if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14616                 return false;
14617
14618         if (!dev_priv->vbt.int_crt_support)
14619                 return false;
14620
14621         return true;
14622 }
14623
14624 static void intel_setup_outputs(struct drm_device *dev)
14625 {
14626         struct drm_i915_private *dev_priv = to_i915(dev);
14627         struct intel_encoder *encoder;
14628         bool dpd_is_edp = false;
14629
14630         /*
14631          * intel_edp_init_connector() depends on this completing first, to
14632          * prevent the registeration of both eDP and LVDS and the incorrect
14633          * sharing of the PPS.
14634          */
14635         intel_lvds_init(dev);
14636
14637         if (intel_crt_present(dev))
14638                 intel_crt_init(dev);
14639
14640         if (IS_BROXTON(dev)) {
14641                 /*
14642                  * FIXME: Broxton doesn't support port detection via the
14643                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14644                  * detect the ports.
14645                  */
14646                 intel_ddi_init(dev, PORT_A);
14647                 intel_ddi_init(dev, PORT_B);
14648                 intel_ddi_init(dev, PORT_C);
14649
14650                 intel_dsi_init(dev);
14651         } else if (HAS_DDI(dev)) {
14652                 int found;
14653
14654                 /*
14655                  * Haswell uses DDI functions to detect digital outputs.
14656                  * On SKL pre-D0 the strap isn't connected, so we assume
14657                  * it's there.
14658                  */
14659                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14660                 /* WaIgnoreDDIAStrap: skl */
14661                 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14662                         intel_ddi_init(dev, PORT_A);
14663
14664                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14665                  * register */
14666                 found = I915_READ(SFUSE_STRAP);
14667
14668                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14669                         intel_ddi_init(dev, PORT_B);
14670                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14671                         intel_ddi_init(dev, PORT_C);
14672                 if (found & SFUSE_STRAP_DDID_DETECTED)
14673                         intel_ddi_init(dev, PORT_D);
14674                 /*
14675                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14676                  */
14677                 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14678                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14679                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14680                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14681                         intel_ddi_init(dev, PORT_E);
14682
14683         } else if (HAS_PCH_SPLIT(dev)) {
14684                 int found;
14685                 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14686
14687                 if (has_edp_a(dev))
14688                         intel_dp_init(dev, DP_A, PORT_A);
14689
14690                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14691                         /* PCH SDVOB multiplex with HDMIB */
14692                         found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14693                         if (!found)
14694                                 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14695                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14696                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
14697                 }
14698
14699                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14700                         intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14701
14702                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14703                         intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14704
14705                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14706                         intel_dp_init(dev, PCH_DP_C, PORT_C);
14707
14708                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14709                         intel_dp_init(dev, PCH_DP_D, PORT_D);
14710         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14711                 bool has_edp, has_port;
14712
14713                 /*
14714                  * The DP_DETECTED bit is the latched state of the DDC
14715                  * SDA pin at boot. However since eDP doesn't require DDC
14716                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14717                  * eDP ports may have been muxed to an alternate function.
14718                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14719                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14720                  * detect eDP ports.
14721                  *
14722                  * Sadly the straps seem to be missing sometimes even for HDMI
14723                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14724                  * and VBT for the presence of the port. Additionally we can't
14725                  * trust the port type the VBT declares as we've seen at least
14726                  * HDMI ports that the VBT claim are DP or eDP.
14727                  */
14728                 has_edp = intel_dp_is_edp(dev, PORT_B);
14729                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14730                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14731                         has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14732                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14733                         intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14734
14735                 has_edp = intel_dp_is_edp(dev, PORT_C);
14736                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14737                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14738                         has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14739                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14740                         intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14741
14742                 if (IS_CHERRYVIEW(dev)) {
14743                         /*
14744                          * eDP not supported on port D,
14745                          * so no need to worry about it
14746                          */
14747                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14748                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14749                                 intel_dp_init(dev, CHV_DP_D, PORT_D);
14750                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14751                                 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14752                 }
14753
14754                 intel_dsi_init(dev);
14755         } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14756                 bool found = false;
14757
14758                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14759                         DRM_DEBUG_KMS("probing SDVOB\n");
14760                         found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14761                         if (!found && IS_G4X(dev)) {
14762                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14763                                 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14764                         }
14765
14766                         if (!found && IS_G4X(dev))
14767                                 intel_dp_init(dev, DP_B, PORT_B);
14768                 }
14769
14770                 /* Before G4X SDVOC doesn't have its own detect register */
14771
14772                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14773                         DRM_DEBUG_KMS("probing SDVOC\n");
14774                         found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14775                 }
14776
14777                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14778
14779                         if (IS_G4X(dev)) {
14780                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14781                                 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14782                         }
14783                         if (IS_G4X(dev))
14784                                 intel_dp_init(dev, DP_C, PORT_C);
14785                 }
14786
14787                 if (IS_G4X(dev) &&
14788                     (I915_READ(DP_D) & DP_DETECTED))
14789                         intel_dp_init(dev, DP_D, PORT_D);
14790         } else if (IS_GEN2(dev))
14791                 intel_dvo_init(dev);
14792
14793         if (SUPPORTS_TV(dev))
14794                 intel_tv_init(dev);
14795
14796         intel_psr_init(dev);
14797
14798         for_each_intel_encoder(dev, encoder) {
14799                 encoder->base.possible_crtcs = encoder->crtc_mask;
14800                 encoder->base.possible_clones =
14801                         intel_encoder_clones(encoder);
14802         }
14803
14804         intel_init_pch_refclk(dev);
14805
14806         drm_helper_move_panel_connectors_to_head(dev);
14807 }
14808
14809 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14810 {
14811         struct drm_device *dev = fb->dev;
14812         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14813
14814         drm_framebuffer_cleanup(fb);
14815         mutex_lock(&dev->struct_mutex);
14816         WARN_ON(!intel_fb->obj->framebuffer_references--);
14817         drm_gem_object_unreference(&intel_fb->obj->base);
14818         mutex_unlock(&dev->struct_mutex);
14819         kfree(intel_fb);
14820 }
14821
14822 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14823                                                 struct drm_file *file,
14824                                                 unsigned int *handle)
14825 {
14826         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14827         struct drm_i915_gem_object *obj = intel_fb->obj;
14828
14829         if (obj->userptr.mm) {
14830                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14831                 return -EINVAL;
14832         }
14833
14834         return drm_gem_handle_create(file, &obj->base, handle);
14835 }
14836
14837 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14838                                         struct drm_file *file,
14839                                         unsigned flags, unsigned color,
14840                                         struct drm_clip_rect *clips,
14841                                         unsigned num_clips)
14842 {
14843         struct drm_device *dev = fb->dev;
14844         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14845         struct drm_i915_gem_object *obj = intel_fb->obj;
14846
14847         mutex_lock(&dev->struct_mutex);
14848         intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14849         mutex_unlock(&dev->struct_mutex);
14850
14851         return 0;
14852 }
14853
14854 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14855         .destroy = intel_user_framebuffer_destroy,
14856         .create_handle = intel_user_framebuffer_create_handle,
14857         .dirty = intel_user_framebuffer_dirty,
14858 };
14859
14860 static
14861 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14862                          uint32_t pixel_format)
14863 {
14864         u32 gen = INTEL_INFO(dev)->gen;
14865
14866         if (gen >= 9) {
14867                 int cpp = drm_format_plane_cpp(pixel_format, 0);
14868
14869                 /* "The stride in bytes must not exceed the of the size of 8K
14870                  *  pixels and 32K bytes."
14871                  */
14872                 return min(8192 * cpp, 32768);
14873         } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14874                 return 32*1024;
14875         } else if (gen >= 4) {
14876                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14877                         return 16*1024;
14878                 else
14879                         return 32*1024;
14880         } else if (gen >= 3) {
14881                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14882                         return 8*1024;
14883                 else
14884                         return 16*1024;
14885         } else {
14886                 /* XXX DSPC is limited to 4k tiled */
14887                 return 8*1024;
14888         }
14889 }
14890
14891 static int intel_framebuffer_init(struct drm_device *dev,
14892                                   struct intel_framebuffer *intel_fb,
14893                                   struct drm_mode_fb_cmd2 *mode_cmd,
14894                                   struct drm_i915_gem_object *obj)
14895 {
14896         struct drm_i915_private *dev_priv = to_i915(dev);
14897         unsigned int aligned_height;
14898         int ret;
14899         u32 pitch_limit, stride_alignment;
14900
14901         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14902
14903         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14904                 /* Enforce that fb modifier and tiling mode match, but only for
14905                  * X-tiled. This is needed for FBC. */
14906                 if (!!(obj->tiling_mode == I915_TILING_X) !=
14907                     !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14908                         DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14909                         return -EINVAL;
14910                 }
14911         } else {
14912                 if (obj->tiling_mode == I915_TILING_X)
14913                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14914                 else if (obj->tiling_mode == I915_TILING_Y) {
14915                         DRM_DEBUG("No Y tiling for legacy addfb\n");
14916                         return -EINVAL;
14917                 }
14918         }
14919
14920         /* Passed in modifier sanity checking. */
14921         switch (mode_cmd->modifier[0]) {
14922         case I915_FORMAT_MOD_Y_TILED:
14923         case I915_FORMAT_MOD_Yf_TILED:
14924                 if (INTEL_INFO(dev)->gen < 9) {
14925                         DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14926                                   mode_cmd->modifier[0]);
14927                         return -EINVAL;
14928                 }
14929         case DRM_FORMAT_MOD_NONE:
14930         case I915_FORMAT_MOD_X_TILED:
14931                 break;
14932         default:
14933                 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14934                           mode_cmd->modifier[0]);
14935                 return -EINVAL;
14936         }
14937
14938         stride_alignment = intel_fb_stride_alignment(dev_priv,
14939                                                      mode_cmd->modifier[0],
14940                                                      mode_cmd->pixel_format);
14941         if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14942                 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14943                           mode_cmd->pitches[0], stride_alignment);
14944                 return -EINVAL;
14945         }
14946
14947         pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14948                                            mode_cmd->pixel_format);
14949         if (mode_cmd->pitches[0] > pitch_limit) {
14950                 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14951                           mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14952                           "tiled" : "linear",
14953                           mode_cmd->pitches[0], pitch_limit);
14954                 return -EINVAL;
14955         }
14956
14957         if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14958             mode_cmd->pitches[0] != obj->stride) {
14959                 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14960                           mode_cmd->pitches[0], obj->stride);
14961                 return -EINVAL;
14962         }
14963
14964         /* Reject formats not supported by any plane early. */
14965         switch (mode_cmd->pixel_format) {
14966         case DRM_FORMAT_C8:
14967         case DRM_FORMAT_RGB565:
14968         case DRM_FORMAT_XRGB8888:
14969         case DRM_FORMAT_ARGB8888:
14970                 break;
14971         case DRM_FORMAT_XRGB1555:
14972                 if (INTEL_INFO(dev)->gen > 3) {
14973                         DRM_DEBUG("unsupported pixel format: %s\n",
14974                                   drm_get_format_name(mode_cmd->pixel_format));
14975                         return -EINVAL;
14976                 }
14977                 break;
14978         case DRM_FORMAT_ABGR8888:
14979                 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14980                     INTEL_INFO(dev)->gen < 9) {
14981                         DRM_DEBUG("unsupported pixel format: %s\n",
14982                                   drm_get_format_name(mode_cmd->pixel_format));
14983                         return -EINVAL;
14984                 }
14985                 break;
14986         case DRM_FORMAT_XBGR8888:
14987         case DRM_FORMAT_XRGB2101010:
14988         case DRM_FORMAT_XBGR2101010:
14989                 if (INTEL_INFO(dev)->gen < 4) {
14990                         DRM_DEBUG("unsupported pixel format: %s\n",
14991                                   drm_get_format_name(mode_cmd->pixel_format));
14992                         return -EINVAL;
14993                 }
14994                 break;
14995         case DRM_FORMAT_ABGR2101010:
14996                 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14997                         DRM_DEBUG("unsupported pixel format: %s\n",
14998                                   drm_get_format_name(mode_cmd->pixel_format));
14999                         return -EINVAL;
15000                 }
15001                 break;
15002         case DRM_FORMAT_YUYV:
15003         case DRM_FORMAT_UYVY:
15004         case DRM_FORMAT_YVYU:
15005         case DRM_FORMAT_VYUY:
15006                 if (INTEL_INFO(dev)->gen < 5) {
15007                         DRM_DEBUG("unsupported pixel format: %s\n",
15008                                   drm_get_format_name(mode_cmd->pixel_format));
15009                         return -EINVAL;
15010                 }
15011                 break;
15012         default:
15013                 DRM_DEBUG("unsupported pixel format: %s\n",
15014                           drm_get_format_name(mode_cmd->pixel_format));
15015                 return -EINVAL;
15016         }
15017
15018         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15019         if (mode_cmd->offsets[0] != 0)
15020                 return -EINVAL;
15021
15022         aligned_height = intel_fb_align_height(dev, mode_cmd->height,
15023                                                mode_cmd->pixel_format,
15024                                                mode_cmd->modifier[0]);
15025         /* FIXME drm helper for size checks (especially planar formats)? */
15026         if (obj->base.size < aligned_height * mode_cmd->pitches[0])
15027                 return -EINVAL;
15028
15029         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
15030         intel_fb->obj = obj;
15031
15032         intel_fill_fb_info(dev_priv, &intel_fb->base);
15033
15034         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
15035         if (ret) {
15036                 DRM_ERROR("framebuffer init failed %d\n", ret);
15037                 return ret;
15038         }
15039
15040         intel_fb->obj->framebuffer_references++;
15041
15042         return 0;
15043 }
15044
15045 static struct drm_framebuffer *
15046 intel_user_framebuffer_create(struct drm_device *dev,
15047                               struct drm_file *filp,
15048                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
15049 {
15050         struct drm_framebuffer *fb;
15051         struct drm_i915_gem_object *obj;
15052         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15053
15054         obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
15055         if (&obj->base == NULL)
15056                 return ERR_PTR(-ENOENT);
15057
15058         fb = intel_framebuffer_create(dev, &mode_cmd, obj);
15059         if (IS_ERR(fb))
15060                 drm_gem_object_unreference_unlocked(&obj->base);
15061
15062         return fb;
15063 }
15064
15065 #ifndef CONFIG_DRM_FBDEV_EMULATION
15066 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
15067 {
15068 }
15069 #endif
15070
15071 static const struct drm_mode_config_funcs intel_mode_funcs = {
15072         .fb_create = intel_user_framebuffer_create,
15073         .output_poll_changed = intel_fbdev_output_poll_changed,
15074         .atomic_check = intel_atomic_check,
15075         .atomic_commit = intel_atomic_commit,
15076         .atomic_state_alloc = intel_atomic_state_alloc,
15077         .atomic_state_clear = intel_atomic_state_clear,
15078 };
15079
15080 /**
15081  * intel_init_display_hooks - initialize the display modesetting hooks
15082  * @dev_priv: device private
15083  */
15084 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15085 {
15086         if (INTEL_INFO(dev_priv)->gen >= 9) {
15087                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15088                 dev_priv->display.get_initial_plane_config =
15089                         skylake_get_initial_plane_config;
15090                 dev_priv->display.crtc_compute_clock =
15091                         haswell_crtc_compute_clock;
15092                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15093                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15094         } else if (HAS_DDI(dev_priv)) {
15095                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15096                 dev_priv->display.get_initial_plane_config =
15097                         ironlake_get_initial_plane_config;
15098                 dev_priv->display.crtc_compute_clock =
15099                         haswell_crtc_compute_clock;
15100                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15101                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15102         } else if (HAS_PCH_SPLIT(dev_priv)) {
15103                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15104                 dev_priv->display.get_initial_plane_config =
15105                         ironlake_get_initial_plane_config;
15106                 dev_priv->display.crtc_compute_clock =
15107                         ironlake_crtc_compute_clock;
15108                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15109                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15110         } else if (IS_CHERRYVIEW(dev_priv)) {
15111                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15112                 dev_priv->display.get_initial_plane_config =
15113                         i9xx_get_initial_plane_config;
15114                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15115                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15116                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15117         } else if (IS_VALLEYVIEW(dev_priv)) {
15118                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15119                 dev_priv->display.get_initial_plane_config =
15120                         i9xx_get_initial_plane_config;
15121                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15122                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15123                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15124         } else if (IS_G4X(dev_priv)) {
15125                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15126                 dev_priv->display.get_initial_plane_config =
15127                         i9xx_get_initial_plane_config;
15128                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15129                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15130                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15131         } else if (IS_PINEVIEW(dev_priv)) {
15132                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15133                 dev_priv->display.get_initial_plane_config =
15134                         i9xx_get_initial_plane_config;
15135                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15136                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15137                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15138         } else if (!IS_GEN2(dev_priv)) {
15139                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15140                 dev_priv->display.get_initial_plane_config =
15141                         i9xx_get_initial_plane_config;
15142                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15143                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15144                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15145         } else {
15146                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15147                 dev_priv->display.get_initial_plane_config =
15148                         i9xx_get_initial_plane_config;
15149                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15150                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15151                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15152         }
15153
15154         /* Returns the core display clock speed */
15155         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15156                 dev_priv->display.get_display_clock_speed =
15157                         skylake_get_display_clock_speed;
15158         else if (IS_BROXTON(dev_priv))
15159                 dev_priv->display.get_display_clock_speed =
15160                         broxton_get_display_clock_speed;
15161         else if (IS_BROADWELL(dev_priv))
15162                 dev_priv->display.get_display_clock_speed =
15163                         broadwell_get_display_clock_speed;
15164         else if (IS_HASWELL(dev_priv))
15165                 dev_priv->display.get_display_clock_speed =
15166                         haswell_get_display_clock_speed;
15167         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15168                 dev_priv->display.get_display_clock_speed =
15169                         valleyview_get_display_clock_speed;
15170         else if (IS_GEN5(dev_priv))
15171                 dev_priv->display.get_display_clock_speed =
15172                         ilk_get_display_clock_speed;
15173         else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15174                  IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
15175                 dev_priv->display.get_display_clock_speed =
15176                         i945_get_display_clock_speed;
15177         else if (IS_GM45(dev_priv))
15178                 dev_priv->display.get_display_clock_speed =
15179                         gm45_get_display_clock_speed;
15180         else if (IS_CRESTLINE(dev_priv))
15181                 dev_priv->display.get_display_clock_speed =
15182                         i965gm_get_display_clock_speed;
15183         else if (IS_PINEVIEW(dev_priv))
15184                 dev_priv->display.get_display_clock_speed =
15185                         pnv_get_display_clock_speed;
15186         else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
15187                 dev_priv->display.get_display_clock_speed =
15188                         g33_get_display_clock_speed;
15189         else if (IS_I915G(dev_priv))
15190                 dev_priv->display.get_display_clock_speed =
15191                         i915_get_display_clock_speed;
15192         else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
15193                 dev_priv->display.get_display_clock_speed =
15194                         i9xx_misc_get_display_clock_speed;
15195         else if (IS_I915GM(dev_priv))
15196                 dev_priv->display.get_display_clock_speed =
15197                         i915gm_get_display_clock_speed;
15198         else if (IS_I865G(dev_priv))
15199                 dev_priv->display.get_display_clock_speed =
15200                         i865_get_display_clock_speed;
15201         else if (IS_I85X(dev_priv))
15202                 dev_priv->display.get_display_clock_speed =
15203                         i85x_get_display_clock_speed;
15204         else { /* 830 */
15205                 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
15206                 dev_priv->display.get_display_clock_speed =
15207                         i830_get_display_clock_speed;
15208         }
15209
15210         if (IS_GEN5(dev_priv)) {
15211                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15212         } else if (IS_GEN6(dev_priv)) {
15213                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15214         } else if (IS_IVYBRIDGE(dev_priv)) {
15215                 /* FIXME: detect B0+ stepping and use auto training */
15216                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15217         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15218                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15219         }
15220
15221         if (IS_BROADWELL(dev_priv)) {
15222                 dev_priv->display.modeset_commit_cdclk =
15223                         broadwell_modeset_commit_cdclk;
15224                 dev_priv->display.modeset_calc_cdclk =
15225                         broadwell_modeset_calc_cdclk;
15226         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15227                 dev_priv->display.modeset_commit_cdclk =
15228                         valleyview_modeset_commit_cdclk;
15229                 dev_priv->display.modeset_calc_cdclk =
15230                         valleyview_modeset_calc_cdclk;
15231         } else if (IS_BROXTON(dev_priv)) {
15232                 dev_priv->display.modeset_commit_cdclk =
15233                         bxt_modeset_commit_cdclk;
15234                 dev_priv->display.modeset_calc_cdclk =
15235                         bxt_modeset_calc_cdclk;
15236         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15237                 dev_priv->display.modeset_commit_cdclk =
15238                         skl_modeset_commit_cdclk;
15239                 dev_priv->display.modeset_calc_cdclk =
15240                         skl_modeset_calc_cdclk;
15241         }
15242
15243         switch (INTEL_INFO(dev_priv)->gen) {
15244         case 2:
15245                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15246                 break;
15247
15248         case 3:
15249                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15250                 break;
15251
15252         case 4:
15253         case 5:
15254                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15255                 break;
15256
15257         case 6:
15258                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15259                 break;
15260         case 7:
15261         case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15262                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15263                 break;
15264         case 9:
15265                 /* Drop through - unsupported since execlist only. */
15266         default:
15267                 /* Default just returns -ENODEV to indicate unsupported */
15268                 dev_priv->display.queue_flip = intel_default_queue_flip;
15269         }
15270 }
15271
15272 /*
15273  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15274  * resume, or other times.  This quirk makes sure that's the case for
15275  * affected systems.
15276  */
15277 static void quirk_pipea_force(struct drm_device *dev)
15278 {
15279         struct drm_i915_private *dev_priv = to_i915(dev);
15280
15281         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15282         DRM_INFO("applying pipe a force quirk\n");
15283 }
15284
15285 static void quirk_pipeb_force(struct drm_device *dev)
15286 {
15287         struct drm_i915_private *dev_priv = to_i915(dev);
15288
15289         dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15290         DRM_INFO("applying pipe b force quirk\n");
15291 }
15292
15293 /*
15294  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15295  */
15296 static void quirk_ssc_force_disable(struct drm_device *dev)
15297 {
15298         struct drm_i915_private *dev_priv = to_i915(dev);
15299         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15300         DRM_INFO("applying lvds SSC disable quirk\n");
15301 }
15302
15303 /*
15304  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15305  * brightness value
15306  */
15307 static void quirk_invert_brightness(struct drm_device *dev)
15308 {
15309         struct drm_i915_private *dev_priv = to_i915(dev);
15310         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15311         DRM_INFO("applying inverted panel brightness quirk\n");
15312 }
15313
15314 /* Some VBT's incorrectly indicate no backlight is present */
15315 static void quirk_backlight_present(struct drm_device *dev)
15316 {
15317         struct drm_i915_private *dev_priv = to_i915(dev);
15318         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15319         DRM_INFO("applying backlight present quirk\n");
15320 }
15321
15322 struct intel_quirk {
15323         int device;
15324         int subsystem_vendor;
15325         int subsystem_device;
15326         void (*hook)(struct drm_device *dev);
15327 };
15328
15329 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15330 struct intel_dmi_quirk {
15331         void (*hook)(struct drm_device *dev);
15332         const struct dmi_system_id (*dmi_id_list)[];
15333 };
15334
15335 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15336 {
15337         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15338         return 1;
15339 }
15340
15341 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15342         {
15343                 .dmi_id_list = &(const struct dmi_system_id[]) {
15344                         {
15345                                 .callback = intel_dmi_reverse_brightness,
15346                                 .ident = "NCR Corporation",
15347                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15348                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
15349                                 },
15350                         },
15351                         { }  /* terminating entry */
15352                 },
15353                 .hook = quirk_invert_brightness,
15354         },
15355 };
15356
15357 static struct intel_quirk intel_quirks[] = {
15358         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15359         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15360
15361         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15362         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15363
15364         /* 830 needs to leave pipe A & dpll A up */
15365         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15366
15367         /* 830 needs to leave pipe B & dpll B up */
15368         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15369
15370         /* Lenovo U160 cannot use SSC on LVDS */
15371         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15372
15373         /* Sony Vaio Y cannot use SSC on LVDS */
15374         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15375
15376         /* Acer Aspire 5734Z must invert backlight brightness */
15377         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15378
15379         /* Acer/eMachines G725 */
15380         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15381
15382         /* Acer/eMachines e725 */
15383         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15384
15385         /* Acer/Packard Bell NCL20 */
15386         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15387
15388         /* Acer Aspire 4736Z */
15389         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15390
15391         /* Acer Aspire 5336 */
15392         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15393
15394         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15395         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15396
15397         /* Acer C720 Chromebook (Core i3 4005U) */
15398         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15399
15400         /* Apple Macbook 2,1 (Core 2 T7400) */
15401         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15402
15403         /* Apple Macbook 4,1 */
15404         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15405
15406         /* Toshiba CB35 Chromebook (Celeron 2955U) */
15407         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15408
15409         /* HP Chromebook 14 (Celeron 2955U) */
15410         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15411
15412         /* Dell Chromebook 11 */
15413         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15414
15415         /* Dell Chromebook 11 (2015 version) */
15416         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15417 };
15418
15419 static void intel_init_quirks(struct drm_device *dev)
15420 {
15421         struct pci_dev *d = dev->pdev;
15422         int i;
15423
15424         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15425                 struct intel_quirk *q = &intel_quirks[i];
15426
15427                 if (d->device == q->device &&
15428                     (d->subsystem_vendor == q->subsystem_vendor ||
15429                      q->subsystem_vendor == PCI_ANY_ID) &&
15430                     (d->subsystem_device == q->subsystem_device ||
15431                      q->subsystem_device == PCI_ANY_ID))
15432                         q->hook(dev);
15433         }
15434         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15435                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15436                         intel_dmi_quirks[i].hook(dev);
15437         }
15438 }
15439
15440 /* Disable the VGA plane that we never use */
15441 static void i915_disable_vga(struct drm_device *dev)
15442 {
15443         struct drm_i915_private *dev_priv = to_i915(dev);
15444         u8 sr1;
15445         i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15446
15447         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15448         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15449         outb(SR01, VGA_SR_INDEX);
15450         sr1 = inb(VGA_SR_DATA);
15451         outb(sr1 | 1<<5, VGA_SR_DATA);
15452         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15453         udelay(300);
15454
15455         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15456         POSTING_READ(vga_reg);
15457 }
15458
15459 void intel_modeset_init_hw(struct drm_device *dev)
15460 {
15461         struct drm_i915_private *dev_priv = to_i915(dev);
15462
15463         intel_update_cdclk(dev);
15464
15465         dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15466
15467         intel_init_clock_gating(dev);
15468         intel_enable_gt_powersave(dev_priv);
15469 }
15470
15471 /*
15472  * Calculate what we think the watermarks should be for the state we've read
15473  * out of the hardware and then immediately program those watermarks so that
15474  * we ensure the hardware settings match our internal state.
15475  *
15476  * We can calculate what we think WM's should be by creating a duplicate of the
15477  * current state (which was constructed during hardware readout) and running it
15478  * through the atomic check code to calculate new watermark values in the
15479  * state object.
15480  */
15481 static void sanitize_watermarks(struct drm_device *dev)
15482 {
15483         struct drm_i915_private *dev_priv = to_i915(dev);
15484         struct drm_atomic_state *state;
15485         struct drm_crtc *crtc;
15486         struct drm_crtc_state *cstate;
15487         struct drm_modeset_acquire_ctx ctx;
15488         int ret;
15489         int i;
15490
15491         /* Only supported on platforms that use atomic watermark design */
15492         if (!dev_priv->display.optimize_watermarks)
15493                 return;
15494
15495         /*
15496          * We need to hold connection_mutex before calling duplicate_state so
15497          * that the connector loop is protected.
15498          */
15499         drm_modeset_acquire_init(&ctx, 0);
15500 retry:
15501         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15502         if (ret == -EDEADLK) {
15503                 drm_modeset_backoff(&ctx);
15504                 goto retry;
15505         } else if (WARN_ON(ret)) {
15506                 goto fail;
15507         }
15508
15509         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15510         if (WARN_ON(IS_ERR(state)))
15511                 goto fail;
15512
15513         /*
15514          * Hardware readout is the only time we don't want to calculate
15515          * intermediate watermarks (since we don't trust the current
15516          * watermarks).
15517          */
15518         to_intel_atomic_state(state)->skip_intermediate_wm = true;
15519
15520         ret = intel_atomic_check(dev, state);
15521         if (ret) {
15522                 /*
15523                  * If we fail here, it means that the hardware appears to be
15524                  * programmed in a way that shouldn't be possible, given our
15525                  * understanding of watermark requirements.  This might mean a
15526                  * mistake in the hardware readout code or a mistake in the
15527                  * watermark calculations for a given platform.  Raise a WARN
15528                  * so that this is noticeable.
15529                  *
15530                  * If this actually happens, we'll have to just leave the
15531                  * BIOS-programmed watermarks untouched and hope for the best.
15532                  */
15533                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15534                 goto fail;
15535         }
15536
15537         /* Write calculated watermark values back */
15538         for_each_crtc_in_state(state, crtc, cstate, i) {
15539                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15540
15541                 cs->wm.need_postvbl_update = true;
15542                 dev_priv->display.optimize_watermarks(cs);
15543         }
15544
15545         drm_atomic_state_free(state);
15546 fail:
15547         drm_modeset_drop_locks(&ctx);
15548         drm_modeset_acquire_fini(&ctx);
15549 }
15550
15551 void intel_modeset_init(struct drm_device *dev)
15552 {
15553         struct drm_i915_private *dev_priv = to_i915(dev);
15554         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15555         int sprite, ret;
15556         enum pipe pipe;
15557         struct intel_crtc *crtc;
15558
15559         drm_mode_config_init(dev);
15560
15561         dev->mode_config.min_width = 0;
15562         dev->mode_config.min_height = 0;
15563
15564         dev->mode_config.preferred_depth = 24;
15565         dev->mode_config.prefer_shadow = 1;
15566
15567         dev->mode_config.allow_fb_modifiers = true;
15568
15569         dev->mode_config.funcs = &intel_mode_funcs;
15570
15571         intel_init_quirks(dev);
15572
15573         intel_init_pm(dev);
15574
15575         if (INTEL_INFO(dev)->num_pipes == 0)
15576                 return;
15577
15578         /*
15579          * There may be no VBT; and if the BIOS enabled SSC we can
15580          * just keep using it to avoid unnecessary flicker.  Whereas if the
15581          * BIOS isn't using it, don't assume it will work even if the VBT
15582          * indicates as much.
15583          */
15584         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15585                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15586                                             DREF_SSC1_ENABLE);
15587
15588                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15589                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15590                                      bios_lvds_use_ssc ? "en" : "dis",
15591                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15592                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15593                 }
15594         }
15595
15596         if (IS_GEN2(dev)) {
15597                 dev->mode_config.max_width = 2048;
15598                 dev->mode_config.max_height = 2048;
15599         } else if (IS_GEN3(dev)) {
15600                 dev->mode_config.max_width = 4096;
15601                 dev->mode_config.max_height = 4096;
15602         } else {
15603                 dev->mode_config.max_width = 8192;
15604                 dev->mode_config.max_height = 8192;
15605         }
15606
15607         if (IS_845G(dev) || IS_I865G(dev)) {
15608                 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15609                 dev->mode_config.cursor_height = 1023;
15610         } else if (IS_GEN2(dev)) {
15611                 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15612                 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15613         } else {
15614                 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15615                 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15616         }
15617
15618         dev->mode_config.fb_base = ggtt->mappable_base;
15619
15620         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15621                       INTEL_INFO(dev)->num_pipes,
15622                       INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15623
15624         for_each_pipe(dev_priv, pipe) {
15625                 intel_crtc_init(dev, pipe);
15626                 for_each_sprite(dev_priv, pipe, sprite) {
15627                         ret = intel_plane_init(dev, pipe, sprite);
15628                         if (ret)
15629                                 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15630                                               pipe_name(pipe), sprite_name(pipe, sprite), ret);
15631                 }
15632         }
15633
15634         intel_update_czclk(dev_priv);
15635         intel_update_cdclk(dev);
15636
15637         intel_shared_dpll_init(dev);
15638
15639         if (dev_priv->max_cdclk_freq == 0)
15640                 intel_update_max_cdclk(dev);
15641
15642         /* Just disable it once at startup */
15643         i915_disable_vga(dev);
15644         intel_setup_outputs(dev);
15645
15646         drm_modeset_lock_all(dev);
15647         intel_modeset_setup_hw_state(dev);
15648         drm_modeset_unlock_all(dev);
15649
15650         for_each_intel_crtc(dev, crtc) {
15651                 struct intel_initial_plane_config plane_config = {};
15652
15653                 if (!crtc->active)
15654                         continue;
15655
15656                 /*
15657                  * Note that reserving the BIOS fb up front prevents us
15658                  * from stuffing other stolen allocations like the ring
15659                  * on top.  This prevents some ugliness at boot time, and
15660                  * can even allow for smooth boot transitions if the BIOS
15661                  * fb is large enough for the active pipe configuration.
15662                  */
15663                 dev_priv->display.get_initial_plane_config(crtc,
15664                                                            &plane_config);
15665
15666                 /*
15667                  * If the fb is shared between multiple heads, we'll
15668                  * just get the first one.
15669                  */
15670                 intel_find_initial_plane_obj(crtc, &plane_config);
15671         }
15672
15673         /*
15674          * Make sure hardware watermarks really match the state we read out.
15675          * Note that we need to do this after reconstructing the BIOS fb's
15676          * since the watermark calculation done here will use pstate->fb.
15677          */
15678         sanitize_watermarks(dev);
15679 }
15680
15681 static void intel_enable_pipe_a(struct drm_device *dev)
15682 {
15683         struct intel_connector *connector;
15684         struct drm_connector *crt = NULL;
15685         struct intel_load_detect_pipe load_detect_temp;
15686         struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15687
15688         /* We can't just switch on the pipe A, we need to set things up with a
15689          * proper mode and output configuration. As a gross hack, enable pipe A
15690          * by enabling the load detect pipe once. */
15691         for_each_intel_connector(dev, connector) {
15692                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15693                         crt = &connector->base;
15694                         break;
15695                 }
15696         }
15697
15698         if (!crt)
15699                 return;
15700
15701         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15702                 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15703 }
15704
15705 static bool
15706 intel_check_plane_mapping(struct intel_crtc *crtc)
15707 {
15708         struct drm_device *dev = crtc->base.dev;
15709         struct drm_i915_private *dev_priv = to_i915(dev);
15710         u32 val;
15711
15712         if (INTEL_INFO(dev)->num_pipes == 1)
15713                 return true;
15714
15715         val = I915_READ(DSPCNTR(!crtc->plane));
15716
15717         if ((val & DISPLAY_PLANE_ENABLE) &&
15718             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15719                 return false;
15720
15721         return true;
15722 }
15723
15724 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15725 {
15726         struct drm_device *dev = crtc->base.dev;
15727         struct intel_encoder *encoder;
15728
15729         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15730                 return true;
15731
15732         return false;
15733 }
15734
15735 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15736 {
15737         struct drm_device *dev = encoder->base.dev;
15738         struct intel_connector *connector;
15739
15740         for_each_connector_on_encoder(dev, &encoder->base, connector)
15741                 return true;
15742
15743         return false;
15744 }
15745
15746 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15747 {
15748         struct drm_device *dev = crtc->base.dev;
15749         struct drm_i915_private *dev_priv = to_i915(dev);
15750         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15751
15752         /* Clear any frame start delays used for debugging left by the BIOS */
15753         if (!transcoder_is_dsi(cpu_transcoder)) {
15754                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15755
15756                 I915_WRITE(reg,
15757                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15758         }
15759
15760         /* restore vblank interrupts to correct state */
15761         drm_crtc_vblank_reset(&crtc->base);
15762         if (crtc->active) {
15763                 struct intel_plane *plane;
15764
15765                 drm_crtc_vblank_on(&crtc->base);
15766
15767                 /* Disable everything but the primary plane */
15768                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15769                         if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15770                                 continue;
15771
15772                         plane->disable_plane(&plane->base, &crtc->base);
15773                 }
15774         }
15775
15776         /* We need to sanitize the plane -> pipe mapping first because this will
15777          * disable the crtc (and hence change the state) if it is wrong. Note
15778          * that gen4+ has a fixed plane -> pipe mapping.  */
15779         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15780                 bool plane;
15781
15782                 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15783                               crtc->base.base.id, crtc->base.name);
15784
15785                 /* Pipe has the wrong plane attached and the plane is active.
15786                  * Temporarily change the plane mapping and disable everything
15787                  * ...  */
15788                 plane = crtc->plane;
15789                 to_intel_plane_state(crtc->base.primary->state)->visible = true;
15790                 crtc->plane = !plane;
15791                 intel_crtc_disable_noatomic(&crtc->base);
15792                 crtc->plane = plane;
15793         }
15794
15795         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15796             crtc->pipe == PIPE_A && !crtc->active) {
15797                 /* BIOS forgot to enable pipe A, this mostly happens after
15798                  * resume. Force-enable the pipe to fix this, the update_dpms
15799                  * call below we restore the pipe to the right state, but leave
15800                  * the required bits on. */
15801                 intel_enable_pipe_a(dev);
15802         }
15803
15804         /* Adjust the state of the output pipe according to whether we
15805          * have active connectors/encoders. */
15806         if (crtc->active && !intel_crtc_has_encoders(crtc))
15807                 intel_crtc_disable_noatomic(&crtc->base);
15808
15809         if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15810                 /*
15811                  * We start out with underrun reporting disabled to avoid races.
15812                  * For correct bookkeeping mark this on active crtcs.
15813                  *
15814                  * Also on gmch platforms we dont have any hardware bits to
15815                  * disable the underrun reporting. Which means we need to start
15816                  * out with underrun reporting disabled also on inactive pipes,
15817                  * since otherwise we'll complain about the garbage we read when
15818                  * e.g. coming up after runtime pm.
15819                  *
15820                  * No protection against concurrent access is required - at
15821                  * worst a fifo underrun happens which also sets this to false.
15822                  */
15823                 crtc->cpu_fifo_underrun_disabled = true;
15824                 crtc->pch_fifo_underrun_disabled = true;
15825         }
15826 }
15827
15828 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15829 {
15830         struct intel_connector *connector;
15831         struct drm_device *dev = encoder->base.dev;
15832
15833         /* We need to check both for a crtc link (meaning that the
15834          * encoder is active and trying to read from a pipe) and the
15835          * pipe itself being active. */
15836         bool has_active_crtc = encoder->base.crtc &&
15837                 to_intel_crtc(encoder->base.crtc)->active;
15838
15839         if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15840                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15841                               encoder->base.base.id,
15842                               encoder->base.name);
15843
15844                 /* Connector is active, but has no active pipe. This is
15845                  * fallout from our resume register restoring. Disable
15846                  * the encoder manually again. */
15847                 if (encoder->base.crtc) {
15848                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15849                                       encoder->base.base.id,
15850                                       encoder->base.name);
15851                         encoder->disable(encoder);
15852                         if (encoder->post_disable)
15853                                 encoder->post_disable(encoder);
15854                 }
15855                 encoder->base.crtc = NULL;
15856
15857                 /* Inconsistent output/port/pipe state happens presumably due to
15858                  * a bug in one of the get_hw_state functions. Or someplace else
15859                  * in our code, like the register restore mess on resume. Clamp
15860                  * things to off as a safer default. */
15861                 for_each_intel_connector(dev, connector) {
15862                         if (connector->encoder != encoder)
15863                                 continue;
15864                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15865                         connector->base.encoder = NULL;
15866                 }
15867         }
15868         /* Enabled encoders without active connectors will be fixed in
15869          * the crtc fixup. */
15870 }
15871
15872 void i915_redisable_vga_power_on(struct drm_device *dev)
15873 {
15874         struct drm_i915_private *dev_priv = to_i915(dev);
15875         i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15876
15877         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15878                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15879                 i915_disable_vga(dev);
15880         }
15881 }
15882
15883 void i915_redisable_vga(struct drm_device *dev)
15884 {
15885         struct drm_i915_private *dev_priv = to_i915(dev);
15886
15887         /* This function can be called both from intel_modeset_setup_hw_state or
15888          * at a very early point in our resume sequence, where the power well
15889          * structures are not yet restored. Since this function is at a very
15890          * paranoid "someone might have enabled VGA while we were not looking"
15891          * level, just check if the power well is enabled instead of trying to
15892          * follow the "don't touch the power well if we don't need it" policy
15893          * the rest of the driver uses. */
15894         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15895                 return;
15896
15897         i915_redisable_vga_power_on(dev);
15898
15899         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15900 }
15901
15902 static bool primary_get_hw_state(struct intel_plane *plane)
15903 {
15904         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15905
15906         return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15907 }
15908
15909 /* FIXME read out full plane state for all planes */
15910 static void readout_plane_state(struct intel_crtc *crtc)
15911 {
15912         struct drm_plane *primary = crtc->base.primary;
15913         struct intel_plane_state *plane_state =
15914                 to_intel_plane_state(primary->state);
15915
15916         plane_state->visible = crtc->active &&
15917                 primary_get_hw_state(to_intel_plane(primary));
15918
15919         if (plane_state->visible)
15920                 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15921 }
15922
15923 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15924 {
15925         struct drm_i915_private *dev_priv = to_i915(dev);
15926         enum pipe pipe;
15927         struct intel_crtc *crtc;
15928         struct intel_encoder *encoder;
15929         struct intel_connector *connector;
15930         int i;
15931
15932         dev_priv->active_crtcs = 0;
15933
15934         for_each_intel_crtc(dev, crtc) {
15935                 struct intel_crtc_state *crtc_state = crtc->config;
15936                 int pixclk = 0;
15937
15938                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15939                 memset(crtc_state, 0, sizeof(*crtc_state));
15940                 crtc_state->base.crtc = &crtc->base;
15941
15942                 crtc_state->base.active = crtc_state->base.enable =
15943                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15944
15945                 crtc->base.enabled = crtc_state->base.enable;
15946                 crtc->active = crtc_state->base.active;
15947
15948                 if (crtc_state->base.active) {
15949                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15950
15951                         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15952                                 pixclk = ilk_pipe_pixel_rate(crtc_state);
15953                         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15954                                 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15955                         else
15956                                 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15957
15958                         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15959                         if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
15960                                 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15961                 }
15962
15963                 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15964
15965                 readout_plane_state(crtc);
15966
15967                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15968                               crtc->base.base.id, crtc->base.name,
15969                               crtc->active ? "enabled" : "disabled");
15970         }
15971
15972         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15973                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15974
15975                 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15976                                                   &pll->config.hw_state);
15977                 pll->config.crtc_mask = 0;
15978                 for_each_intel_crtc(dev, crtc) {
15979                         if (crtc->active && crtc->config->shared_dpll == pll)
15980                                 pll->config.crtc_mask |= 1 << crtc->pipe;
15981                 }
15982                 pll->active_mask = pll->config.crtc_mask;
15983
15984                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15985                               pll->name, pll->config.crtc_mask, pll->on);
15986         }
15987
15988         for_each_intel_encoder(dev, encoder) {
15989                 pipe = 0;
15990
15991                 if (encoder->get_hw_state(encoder, &pipe)) {
15992                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15993                         encoder->base.crtc = &crtc->base;
15994                         crtc->config->output_types |= 1 << encoder->type;
15995                         encoder->get_config(encoder, crtc->config);
15996                 } else {
15997                         encoder->base.crtc = NULL;
15998                 }
15999
16000                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16001                               encoder->base.base.id,
16002                               encoder->base.name,
16003                               encoder->base.crtc ? "enabled" : "disabled",
16004                               pipe_name(pipe));
16005         }
16006
16007         for_each_intel_connector(dev, connector) {
16008                 if (connector->get_hw_state(connector)) {
16009                         connector->base.dpms = DRM_MODE_DPMS_ON;
16010
16011                         encoder = connector->encoder;
16012                         connector->base.encoder = &encoder->base;
16013
16014                         if (encoder->base.crtc &&
16015                             encoder->base.crtc->state->active) {
16016                                 /*
16017                                  * This has to be done during hardware readout
16018                                  * because anything calling .crtc_disable may
16019                                  * rely on the connector_mask being accurate.
16020                                  */
16021                                 encoder->base.crtc->state->connector_mask |=
16022                                         1 << drm_connector_index(&connector->base);
16023                                 encoder->base.crtc->state->encoder_mask |=
16024                                         1 << drm_encoder_index(&encoder->base);
16025                         }
16026
16027                 } else {
16028                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16029                         connector->base.encoder = NULL;
16030                 }
16031                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16032                               connector->base.base.id,
16033                               connector->base.name,
16034                               connector->base.encoder ? "enabled" : "disabled");
16035         }
16036
16037         for_each_intel_crtc(dev, crtc) {
16038                 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16039
16040                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16041                 if (crtc->base.state->active) {
16042                         intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
16043                         intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
16044                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16045
16046                         /*
16047                          * The initial mode needs to be set in order to keep
16048                          * the atomic core happy. It wants a valid mode if the
16049                          * crtc's enabled, so we do the above call.
16050                          *
16051                          * At this point some state updated by the connectors
16052                          * in their ->detect() callback has not run yet, so
16053                          * no recalculation can be done yet.
16054                          *
16055                          * Even if we could do a recalculation and modeset
16056                          * right now it would cause a double modeset if
16057                          * fbdev or userspace chooses a different initial mode.
16058                          *
16059                          * If that happens, someone indicated they wanted a
16060                          * mode change, which means it's safe to do a full
16061                          * recalculation.
16062                          */
16063                         crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
16064
16065                         drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16066                         update_scanline_offset(crtc);
16067                 }
16068
16069                 intel_pipe_config_sanity_check(dev_priv, crtc->config);
16070         }
16071 }
16072
16073 /* Scan out the current hw modeset state,
16074  * and sanitizes it to the current state
16075  */
16076 static void
16077 intel_modeset_setup_hw_state(struct drm_device *dev)
16078 {
16079         struct drm_i915_private *dev_priv = to_i915(dev);
16080         enum pipe pipe;
16081         struct intel_crtc *crtc;
16082         struct intel_encoder *encoder;
16083         int i;
16084
16085         intel_modeset_readout_hw_state(dev);
16086
16087         /* HW state is read out, now we need to sanitize this mess. */
16088         for_each_intel_encoder(dev, encoder) {
16089                 intel_sanitize_encoder(encoder);
16090         }
16091
16092         for_each_pipe(dev_priv, pipe) {
16093                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16094                 intel_sanitize_crtc(crtc);
16095                 intel_dump_pipe_config(crtc, crtc->config,
16096                                        "[setup_hw_state]");
16097         }
16098
16099         intel_modeset_update_connector_atomic_state(dev);
16100
16101         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16102                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16103
16104                 if (!pll->on || pll->active_mask)
16105                         continue;
16106
16107                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
16108
16109                 pll->funcs.disable(dev_priv, pll);
16110                 pll->on = false;
16111         }
16112
16113         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
16114                 vlv_wm_get_hw_state(dev);
16115         else if (IS_GEN9(dev))
16116                 skl_wm_get_hw_state(dev);
16117         else if (HAS_PCH_SPLIT(dev))
16118                 ilk_wm_get_hw_state(dev);
16119
16120         for_each_intel_crtc(dev, crtc) {
16121                 unsigned long put_domains;
16122
16123                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
16124                 if (WARN_ON(put_domains))
16125                         modeset_put_power_domains(dev_priv, put_domains);
16126         }
16127         intel_display_set_init_power(dev_priv, false);
16128
16129         intel_fbc_init_pipe_state(dev_priv);
16130 }
16131
16132 void intel_display_resume(struct drm_device *dev)
16133 {
16134         struct drm_i915_private *dev_priv = to_i915(dev);
16135         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16136         struct drm_modeset_acquire_ctx ctx;
16137         int ret;
16138         bool setup = false;
16139
16140         dev_priv->modeset_restore_state = NULL;
16141
16142         /*
16143          * This is a cludge because with real atomic modeset mode_config.mutex
16144          * won't be taken. Unfortunately some probed state like
16145          * audio_codec_enable is still protected by mode_config.mutex, so lock
16146          * it here for now.
16147          */
16148         mutex_lock(&dev->mode_config.mutex);
16149         drm_modeset_acquire_init(&ctx, 0);
16150
16151 retry:
16152         ret = drm_modeset_lock_all_ctx(dev, &ctx);
16153
16154         if (ret == 0 && !setup) {
16155                 setup = true;
16156
16157                 intel_modeset_setup_hw_state(dev);
16158                 i915_redisable_vga(dev);
16159         }
16160
16161         if (ret == 0 && state) {
16162                 struct drm_crtc_state *crtc_state;
16163                 struct drm_crtc *crtc;
16164                 int i;
16165
16166                 state->acquire_ctx = &ctx;
16167
16168                 /* ignore any reset values/BIOS leftovers in the WM registers */
16169                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
16170
16171                 for_each_crtc_in_state(state, crtc, crtc_state, i) {
16172                         /*
16173                          * Force recalculation even if we restore
16174                          * current state. With fast modeset this may not result
16175                          * in a modeset when the state is compatible.
16176                          */
16177                         crtc_state->mode_changed = true;
16178                 }
16179
16180                 ret = drm_atomic_commit(state);
16181         }
16182
16183         if (ret == -EDEADLK) {
16184                 drm_modeset_backoff(&ctx);
16185                 goto retry;
16186         }
16187
16188         drm_modeset_drop_locks(&ctx);
16189         drm_modeset_acquire_fini(&ctx);
16190         mutex_unlock(&dev->mode_config.mutex);
16191
16192         if (ret) {
16193                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16194                 drm_atomic_state_free(state);
16195         }
16196 }
16197
16198 void intel_modeset_gem_init(struct drm_device *dev)
16199 {
16200         struct drm_i915_private *dev_priv = to_i915(dev);
16201         struct drm_crtc *c;
16202         struct drm_i915_gem_object *obj;
16203         int ret;
16204
16205         intel_init_gt_powersave(dev_priv);
16206
16207         intel_modeset_init_hw(dev);
16208
16209         intel_setup_overlay(dev_priv);
16210
16211         /*
16212          * Make sure any fbs we allocated at startup are properly
16213          * pinned & fenced.  When we do the allocation it's too early
16214          * for this.
16215          */
16216         for_each_crtc(dev, c) {
16217                 obj = intel_fb_obj(c->primary->fb);
16218                 if (obj == NULL)
16219                         continue;
16220
16221                 mutex_lock(&dev->struct_mutex);
16222                 ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16223                                                  c->primary->state->rotation);
16224                 mutex_unlock(&dev->struct_mutex);
16225                 if (ret) {
16226                         DRM_ERROR("failed to pin boot fb on pipe %d\n",
16227                                   to_intel_crtc(c)->pipe);
16228                         drm_framebuffer_unreference(c->primary->fb);
16229                         c->primary->fb = NULL;
16230                         c->primary->crtc = c->primary->state->crtc = NULL;
16231                         update_state_fb(c->primary);
16232                         c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16233                 }
16234         }
16235 }
16236
16237 int intel_connector_register(struct drm_connector *connector)
16238 {
16239         struct intel_connector *intel_connector = to_intel_connector(connector);
16240         int ret;
16241
16242         ret = intel_backlight_device_register(intel_connector);
16243         if (ret)
16244                 goto err;
16245
16246         return 0;
16247
16248 err:
16249         return ret;
16250 }
16251
16252 void intel_connector_unregister(struct drm_connector *connector)
16253 {
16254         struct intel_connector *intel_connector = to_intel_connector(connector);
16255
16256         intel_backlight_device_unregister(intel_connector);
16257         intel_panel_destroy_backlight(connector);
16258 }
16259
16260 void intel_modeset_cleanup(struct drm_device *dev)
16261 {
16262         struct drm_i915_private *dev_priv = to_i915(dev);
16263
16264         intel_disable_gt_powersave(dev_priv);
16265
16266         /*
16267          * Interrupts and polling as the first thing to avoid creating havoc.
16268          * Too much stuff here (turning of connectors, ...) would
16269          * experience fancy races otherwise.
16270          */
16271         intel_irq_uninstall(dev_priv);
16272
16273         /*
16274          * Due to the hpd irq storm handling the hotplug work can re-arm the
16275          * poll handlers. Hence disable polling after hpd handling is shut down.
16276          */
16277         drm_kms_helper_poll_fini(dev);
16278
16279         intel_unregister_dsm_handler();
16280
16281         intel_fbc_global_disable(dev_priv);
16282
16283         /* flush any delayed tasks or pending work */
16284         flush_scheduled_work();
16285
16286         drm_mode_config_cleanup(dev);
16287
16288         intel_cleanup_overlay(dev_priv);
16289
16290         intel_cleanup_gt_powersave(dev_priv);
16291
16292         intel_teardown_gmbus(dev);
16293 }
16294
16295 void intel_connector_attach_encoder(struct intel_connector *connector,
16296                                     struct intel_encoder *encoder)
16297 {
16298         connector->encoder = encoder;
16299         drm_mode_connector_attach_encoder(&connector->base,
16300                                           &encoder->base);
16301 }
16302
16303 /*
16304  * set vga decode state - true == enable VGA decode
16305  */
16306 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16307 {
16308         struct drm_i915_private *dev_priv = to_i915(dev);
16309         unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16310         u16 gmch_ctrl;
16311
16312         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16313                 DRM_ERROR("failed to read control word\n");
16314                 return -EIO;
16315         }
16316
16317         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16318                 return 0;
16319
16320         if (state)
16321                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16322         else
16323                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16324
16325         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16326                 DRM_ERROR("failed to write control word\n");
16327                 return -EIO;
16328         }
16329
16330         return 0;
16331 }
16332
16333 struct intel_display_error_state {
16334
16335         u32 power_well_driver;
16336
16337         int num_transcoders;
16338
16339         struct intel_cursor_error_state {
16340                 u32 control;
16341                 u32 position;
16342                 u32 base;
16343                 u32 size;
16344         } cursor[I915_MAX_PIPES];
16345
16346         struct intel_pipe_error_state {
16347                 bool power_domain_on;
16348                 u32 source;
16349                 u32 stat;
16350         } pipe[I915_MAX_PIPES];
16351
16352         struct intel_plane_error_state {
16353                 u32 control;
16354                 u32 stride;
16355                 u32 size;
16356                 u32 pos;
16357                 u32 addr;
16358                 u32 surface;
16359                 u32 tile_offset;
16360         } plane[I915_MAX_PIPES];
16361
16362         struct intel_transcoder_error_state {
16363                 bool power_domain_on;
16364                 enum transcoder cpu_transcoder;
16365
16366                 u32 conf;
16367
16368                 u32 htotal;
16369                 u32 hblank;
16370                 u32 hsync;
16371                 u32 vtotal;
16372                 u32 vblank;
16373                 u32 vsync;
16374         } transcoder[4];
16375 };
16376
16377 struct intel_display_error_state *
16378 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16379 {
16380         struct intel_display_error_state *error;
16381         int transcoders[] = {
16382                 TRANSCODER_A,
16383                 TRANSCODER_B,
16384                 TRANSCODER_C,
16385                 TRANSCODER_EDP,
16386         };
16387         int i;
16388
16389         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16390                 return NULL;
16391
16392         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16393         if (error == NULL)
16394                 return NULL;
16395
16396         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16397                 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16398
16399         for_each_pipe(dev_priv, i) {
16400                 error->pipe[i].power_domain_on =
16401                         __intel_display_power_is_enabled(dev_priv,
16402                                                          POWER_DOMAIN_PIPE(i));
16403                 if (!error->pipe[i].power_domain_on)
16404                         continue;
16405
16406                 error->cursor[i].control = I915_READ(CURCNTR(i));
16407                 error->cursor[i].position = I915_READ(CURPOS(i));
16408                 error->cursor[i].base = I915_READ(CURBASE(i));
16409
16410                 error->plane[i].control = I915_READ(DSPCNTR(i));
16411                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16412                 if (INTEL_GEN(dev_priv) <= 3) {
16413                         error->plane[i].size = I915_READ(DSPSIZE(i));
16414                         error->plane[i].pos = I915_READ(DSPPOS(i));
16415                 }
16416                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16417                         error->plane[i].addr = I915_READ(DSPADDR(i));
16418                 if (INTEL_GEN(dev_priv) >= 4) {
16419                         error->plane[i].surface = I915_READ(DSPSURF(i));
16420                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16421                 }
16422
16423                 error->pipe[i].source = I915_READ(PIPESRC(i));
16424
16425                 if (HAS_GMCH_DISPLAY(dev_priv))
16426                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16427         }
16428
16429         /* Note: this does not include DSI transcoders. */
16430         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16431         if (HAS_DDI(dev_priv))
16432                 error->num_transcoders++; /* Account for eDP. */
16433
16434         for (i = 0; i < error->num_transcoders; i++) {
16435                 enum transcoder cpu_transcoder = transcoders[i];
16436
16437                 error->transcoder[i].power_domain_on =
16438                         __intel_display_power_is_enabled(dev_priv,
16439                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16440                 if (!error->transcoder[i].power_domain_on)
16441                         continue;
16442
16443                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16444
16445                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16446                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16447                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16448                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16449                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16450                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16451                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16452         }
16453
16454         return error;
16455 }
16456
16457 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16458
16459 void
16460 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16461                                 struct drm_device *dev,
16462                                 struct intel_display_error_state *error)
16463 {
16464         struct drm_i915_private *dev_priv = to_i915(dev);
16465         int i;
16466
16467         if (!error)
16468                 return;
16469
16470         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16471         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16472                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16473                            error->power_well_driver);
16474         for_each_pipe(dev_priv, i) {
16475                 err_printf(m, "Pipe [%d]:\n", i);
16476                 err_printf(m, "  Power: %s\n",
16477                            onoff(error->pipe[i].power_domain_on));
16478                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16479                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16480
16481                 err_printf(m, "Plane [%d]:\n", i);
16482                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16483                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16484                 if (INTEL_INFO(dev)->gen <= 3) {
16485                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16486                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16487                 }
16488                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16489                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16490                 if (INTEL_INFO(dev)->gen >= 4) {
16491                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16492                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16493                 }
16494
16495                 err_printf(m, "Cursor [%d]:\n", i);
16496                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16497                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16498                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16499         }
16500
16501         for (i = 0; i < error->num_transcoders; i++) {
16502                 err_printf(m, "CPU transcoder: %s\n",
16503                            transcoder_name(error->transcoder[i].cpu_transcoder));
16504                 err_printf(m, "  Power: %s\n",
16505                            onoff(error->transcoder[i].power_domain_on));
16506                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16507                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16508                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16509                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16510                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16511                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16512                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16513         }
16514 }