Fix display underruns on Pineview with 2048x1280 VGA display.
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/cpufreq.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include "drmP.h"
36 #include "intel_drv.h"
37 #include "i915_drm.h"
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
43
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47 static void intel_update_watermarks(struct drm_device *dev);
48 static void intel_increase_pllclock(struct drm_crtc *crtc);
49 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
50
51 typedef struct {
52         /* given values */
53         int n;
54         int m1, m2;
55         int p1, p2;
56         /* derived values */
57         int     dot;
58         int     vco;
59         int     m;
60         int     p;
61 } intel_clock_t;
62
63 typedef struct {
64         int     min, max;
65 } intel_range_t;
66
67 typedef struct {
68         int     dot_limit;
69         int     p2_slow, p2_fast;
70 } intel_p2_t;
71
72 #define INTEL_P2_NUM                  2
73 typedef struct intel_limit intel_limit_t;
74 struct intel_limit {
75         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
76         intel_p2_t          p2;
77         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
78                         int, int, intel_clock_t *, intel_clock_t *);
79 };
80
81 /* FDI */
82 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
83
84 static bool
85 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86                     int target, int refclk, intel_clock_t *match_clock,
87                     intel_clock_t *best_clock);
88 static bool
89 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
90                         int target, int refclk, intel_clock_t *match_clock,
91                         intel_clock_t *best_clock);
92
93 static bool
94 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
95                       int target, int refclk, intel_clock_t *match_clock,
96                       intel_clock_t *best_clock);
97 static bool
98 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
99                            int target, int refclk, intel_clock_t *match_clock,
100                            intel_clock_t *best_clock);
101
102 static inline u32 /* units of 100MHz */
103 intel_fdi_link_freq(struct drm_device *dev)
104 {
105         if (IS_GEN5(dev)) {
106                 struct drm_i915_private *dev_priv = dev->dev_private;
107                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
108         } else
109                 return 27;
110 }
111
112 static const intel_limit_t intel_limits_i8xx_dvo = {
113         .dot = { .min = 25000, .max = 350000 },
114         .vco = { .min = 930000, .max = 1400000 },
115         .n = { .min = 3, .max = 16 },
116         .m = { .min = 96, .max = 140 },
117         .m1 = { .min = 18, .max = 26 },
118         .m2 = { .min = 6, .max = 16 },
119         .p = { .min = 4, .max = 128 },
120         .p1 = { .min = 2, .max = 33 },
121         .p2 = { .dot_limit = 165000,
122                 .p2_slow = 4, .p2_fast = 2 },
123         .find_pll = intel_find_best_PLL,
124 };
125
126 static const intel_limit_t intel_limits_i8xx_lvds = {
127         .dot = { .min = 25000, .max = 350000 },
128         .vco = { .min = 930000, .max = 1400000 },
129         .n = { .min = 3, .max = 16 },
130         .m = { .min = 96, .max = 140 },
131         .m1 = { .min = 18, .max = 26 },
132         .m2 = { .min = 6, .max = 16 },
133         .p = { .min = 4, .max = 128 },
134         .p1 = { .min = 1, .max = 6 },
135         .p2 = { .dot_limit = 165000,
136                 .p2_slow = 14, .p2_fast = 7 },
137         .find_pll = intel_find_best_PLL,
138 };
139
140 static const intel_limit_t intel_limits_i9xx_sdvo = {
141         .dot = { .min = 20000, .max = 400000 },
142         .vco = { .min = 1400000, .max = 2800000 },
143         .n = { .min = 1, .max = 6 },
144         .m = { .min = 70, .max = 120 },
145         .m1 = { .min = 10, .max = 22 },
146         .m2 = { .min = 5, .max = 9 },
147         .p = { .min = 5, .max = 80 },
148         .p1 = { .min = 1, .max = 8 },
149         .p2 = { .dot_limit = 200000,
150                 .p2_slow = 10, .p2_fast = 5 },
151         .find_pll = intel_find_best_PLL,
152 };
153
154 static const intel_limit_t intel_limits_i9xx_lvds = {
155         .dot = { .min = 20000, .max = 400000 },
156         .vco = { .min = 1400000, .max = 2800000 },
157         .n = { .min = 1, .max = 6 },
158         .m = { .min = 70, .max = 120 },
159         .m1 = { .min = 10, .max = 22 },
160         .m2 = { .min = 5, .max = 9 },
161         .p = { .min = 7, .max = 98 },
162         .p1 = { .min = 1, .max = 8 },
163         .p2 = { .dot_limit = 112000,
164                 .p2_slow = 14, .p2_fast = 7 },
165         .find_pll = intel_find_best_PLL,
166 };
167
168
169 static const intel_limit_t intel_limits_g4x_sdvo = {
170         .dot = { .min = 25000, .max = 270000 },
171         .vco = { .min = 1750000, .max = 3500000},
172         .n = { .min = 1, .max = 4 },
173         .m = { .min = 104, .max = 138 },
174         .m1 = { .min = 17, .max = 23 },
175         .m2 = { .min = 5, .max = 11 },
176         .p = { .min = 10, .max = 30 },
177         .p1 = { .min = 1, .max = 3},
178         .p2 = { .dot_limit = 270000,
179                 .p2_slow = 10,
180                 .p2_fast = 10
181         },
182         .find_pll = intel_g4x_find_best_PLL,
183 };
184
185 static const intel_limit_t intel_limits_g4x_hdmi = {
186         .dot = { .min = 22000, .max = 400000 },
187         .vco = { .min = 1750000, .max = 3500000},
188         .n = { .min = 1, .max = 4 },
189         .m = { .min = 104, .max = 138 },
190         .m1 = { .min = 16, .max = 23 },
191         .m2 = { .min = 5, .max = 11 },
192         .p = { .min = 5, .max = 80 },
193         .p1 = { .min = 1, .max = 8},
194         .p2 = { .dot_limit = 165000,
195                 .p2_slow = 10, .p2_fast = 5 },
196         .find_pll = intel_g4x_find_best_PLL,
197 };
198
199 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
200         .dot = { .min = 20000, .max = 115000 },
201         .vco = { .min = 1750000, .max = 3500000 },
202         .n = { .min = 1, .max = 3 },
203         .m = { .min = 104, .max = 138 },
204         .m1 = { .min = 17, .max = 23 },
205         .m2 = { .min = 5, .max = 11 },
206         .p = { .min = 28, .max = 112 },
207         .p1 = { .min = 2, .max = 8 },
208         .p2 = { .dot_limit = 0,
209                 .p2_slow = 14, .p2_fast = 14
210         },
211         .find_pll = intel_g4x_find_best_PLL,
212 };
213
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215         .dot = { .min = 80000, .max = 224000 },
216         .vco = { .min = 1750000, .max = 3500000 },
217         .n = { .min = 1, .max = 3 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 14, .max = 42 },
222         .p1 = { .min = 2, .max = 6 },
223         .p2 = { .dot_limit = 0,
224                 .p2_slow = 7, .p2_fast = 7
225         },
226         .find_pll = intel_g4x_find_best_PLL,
227 };
228
229 static const intel_limit_t intel_limits_g4x_display_port = {
230         .dot = { .min = 161670, .max = 227000 },
231         .vco = { .min = 1750000, .max = 3500000},
232         .n = { .min = 1, .max = 2 },
233         .m = { .min = 97, .max = 108 },
234         .m1 = { .min = 0x10, .max = 0x12 },
235         .m2 = { .min = 0x05, .max = 0x06 },
236         .p = { .min = 10, .max = 20 },
237         .p1 = { .min = 1, .max = 2},
238         .p2 = { .dot_limit = 0,
239                 .p2_slow = 10, .p2_fast = 10 },
240         .find_pll = intel_find_pll_g4x_dp,
241 };
242
243 static const intel_limit_t intel_limits_pineview_sdvo = {
244         .dot = { .min = 20000, .max = 400000},
245         .vco = { .min = 1700000, .max = 3500000 },
246         /* Pineview's Ncounter is a ring counter */
247         .n = { .min = 3, .max = 6 },
248         .m = { .min = 2, .max = 256 },
249         /* Pineview only has one combined m divider, which we treat as m2. */
250         .m1 = { .min = 0, .max = 0 },
251         .m2 = { .min = 0, .max = 254 },
252         .p = { .min = 5, .max = 80 },
253         .p1 = { .min = 1, .max = 8 },
254         .p2 = { .dot_limit = 200000,
255                 .p2_slow = 10, .p2_fast = 5 },
256         .find_pll = intel_find_best_PLL,
257 };
258
259 static const intel_limit_t intel_limits_pineview_lvds = {
260         .dot = { .min = 20000, .max = 400000 },
261         .vco = { .min = 1700000, .max = 3500000 },
262         .n = { .min = 3, .max = 6 },
263         .m = { .min = 2, .max = 256 },
264         .m1 = { .min = 0, .max = 0 },
265         .m2 = { .min = 0, .max = 254 },
266         .p = { .min = 7, .max = 112 },
267         .p1 = { .min = 1, .max = 8 },
268         .p2 = { .dot_limit = 112000,
269                 .p2_slow = 14, .p2_fast = 14 },
270         .find_pll = intel_find_best_PLL,
271 };
272
273 /* Ironlake / Sandybridge
274  *
275  * We calculate clock using (register_value + 2) for N/M1/M2, so here
276  * the range value for them is (actual_value - 2).
277  */
278 static const intel_limit_t intel_limits_ironlake_dac = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 1760000, .max = 3510000 },
281         .n = { .min = 1, .max = 5 },
282         .m = { .min = 79, .max = 127 },
283         .m1 = { .min = 12, .max = 22 },
284         .m2 = { .min = 5, .max = 9 },
285         .p = { .min = 5, .max = 80 },
286         .p1 = { .min = 1, .max = 8 },
287         .p2 = { .dot_limit = 225000,
288                 .p2_slow = 10, .p2_fast = 5 },
289         .find_pll = intel_g4x_find_best_PLL,
290 };
291
292 static const intel_limit_t intel_limits_ironlake_single_lvds = {
293         .dot = { .min = 25000, .max = 350000 },
294         .vco = { .min = 1760000, .max = 3510000 },
295         .n = { .min = 1, .max = 3 },
296         .m = { .min = 79, .max = 118 },
297         .m1 = { .min = 12, .max = 22 },
298         .m2 = { .min = 5, .max = 9 },
299         .p = { .min = 28, .max = 112 },
300         .p1 = { .min = 2, .max = 8 },
301         .p2 = { .dot_limit = 225000,
302                 .p2_slow = 14, .p2_fast = 14 },
303         .find_pll = intel_g4x_find_best_PLL,
304 };
305
306 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
307         .dot = { .min = 25000, .max = 350000 },
308         .vco = { .min = 1760000, .max = 3510000 },
309         .n = { .min = 1, .max = 3 },
310         .m = { .min = 79, .max = 127 },
311         .m1 = { .min = 12, .max = 22 },
312         .m2 = { .min = 5, .max = 9 },
313         .p = { .min = 14, .max = 56 },
314         .p1 = { .min = 2, .max = 8 },
315         .p2 = { .dot_limit = 225000,
316                 .p2_slow = 7, .p2_fast = 7 },
317         .find_pll = intel_g4x_find_best_PLL,
318 };
319
320 /* LVDS 100mhz refclk limits. */
321 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
322         .dot = { .min = 25000, .max = 350000 },
323         .vco = { .min = 1760000, .max = 3510000 },
324         .n = { .min = 1, .max = 2 },
325         .m = { .min = 79, .max = 126 },
326         .m1 = { .min = 12, .max = 22 },
327         .m2 = { .min = 5, .max = 9 },
328         .p = { .min = 28, .max = 112 },
329         .p1 = { .min = 2, .max = 8 },
330         .p2 = { .dot_limit = 225000,
331                 .p2_slow = 14, .p2_fast = 14 },
332         .find_pll = intel_g4x_find_best_PLL,
333 };
334
335 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
336         .dot = { .min = 25000, .max = 350000 },
337         .vco = { .min = 1760000, .max = 3510000 },
338         .n = { .min = 1, .max = 3 },
339         .m = { .min = 79, .max = 126 },
340         .m1 = { .min = 12, .max = 22 },
341         .m2 = { .min = 5, .max = 9 },
342         .p = { .min = 14, .max = 42 },
343         .p1 = { .min = 2, .max = 6 },
344         .p2 = { .dot_limit = 225000,
345                 .p2_slow = 7, .p2_fast = 7 },
346         .find_pll = intel_g4x_find_best_PLL,
347 };
348
349 static const intel_limit_t intel_limits_ironlake_display_port = {
350         .dot = { .min = 25000, .max = 350000 },
351         .vco = { .min = 1760000, .max = 3510000},
352         .n = { .min = 1, .max = 2 },
353         .m = { .min = 81, .max = 90 },
354         .m1 = { .min = 12, .max = 22 },
355         .m2 = { .min = 5, .max = 9 },
356         .p = { .min = 10, .max = 20 },
357         .p1 = { .min = 1, .max = 2},
358         .p2 = { .dot_limit = 0,
359                 .p2_slow = 10, .p2_fast = 10 },
360         .find_pll = intel_find_pll_ironlake_dp,
361 };
362
363 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
364                                                 int refclk)
365 {
366         struct drm_device *dev = crtc->dev;
367         struct drm_i915_private *dev_priv = dev->dev_private;
368         const intel_limit_t *limit;
369
370         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
371                 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
372                     LVDS_CLKB_POWER_UP) {
373                         /* LVDS dual channel */
374                         if (refclk == 100000)
375                                 limit = &intel_limits_ironlake_dual_lvds_100m;
376                         else
377                                 limit = &intel_limits_ironlake_dual_lvds;
378                 } else {
379                         if (refclk == 100000)
380                                 limit = &intel_limits_ironlake_single_lvds_100m;
381                         else
382                                 limit = &intel_limits_ironlake_single_lvds;
383                 }
384         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
385                         HAS_eDP)
386                 limit = &intel_limits_ironlake_display_port;
387         else
388                 limit = &intel_limits_ironlake_dac;
389
390         return limit;
391 }
392
393 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
394 {
395         struct drm_device *dev = crtc->dev;
396         struct drm_i915_private *dev_priv = dev->dev_private;
397         const intel_limit_t *limit;
398
399         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
400                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
401                     LVDS_CLKB_POWER_UP)
402                         /* LVDS with dual channel */
403                         limit = &intel_limits_g4x_dual_channel_lvds;
404                 else
405                         /* LVDS with dual channel */
406                         limit = &intel_limits_g4x_single_channel_lvds;
407         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
408                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
409                 limit = &intel_limits_g4x_hdmi;
410         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
411                 limit = &intel_limits_g4x_sdvo;
412         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
413                 limit = &intel_limits_g4x_display_port;
414         } else /* The option is for other outputs */
415                 limit = &intel_limits_i9xx_sdvo;
416
417         return limit;
418 }
419
420 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
421 {
422         struct drm_device *dev = crtc->dev;
423         const intel_limit_t *limit;
424
425         if (HAS_PCH_SPLIT(dev))
426                 limit = intel_ironlake_limit(crtc, refclk);
427         else if (IS_G4X(dev)) {
428                 limit = intel_g4x_limit(crtc);
429         } else if (IS_PINEVIEW(dev)) {
430                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
431                         limit = &intel_limits_pineview_lvds;
432                 else
433                         limit = &intel_limits_pineview_sdvo;
434         } else if (!IS_GEN2(dev)) {
435                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
436                         limit = &intel_limits_i9xx_lvds;
437                 else
438                         limit = &intel_limits_i9xx_sdvo;
439         } else {
440                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
441                         limit = &intel_limits_i8xx_lvds;
442                 else
443                         limit = &intel_limits_i8xx_dvo;
444         }
445         return limit;
446 }
447
448 /* m1 is reserved as 0 in Pineview, n is a ring counter */
449 static void pineview_clock(int refclk, intel_clock_t *clock)
450 {
451         clock->m = clock->m2 + 2;
452         clock->p = clock->p1 * clock->p2;
453         clock->vco = refclk * clock->m / clock->n;
454         clock->dot = clock->vco / clock->p;
455 }
456
457 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
458 {
459         if (IS_PINEVIEW(dev)) {
460                 pineview_clock(refclk, clock);
461                 return;
462         }
463         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
464         clock->p = clock->p1 * clock->p2;
465         clock->vco = refclk * clock->m / (clock->n + 2);
466         clock->dot = clock->vco / clock->p;
467 }
468
469 /**
470  * Returns whether any output on the specified pipe is of the specified type
471  */
472 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
473 {
474         struct drm_device *dev = crtc->dev;
475         struct drm_mode_config *mode_config = &dev->mode_config;
476         struct intel_encoder *encoder;
477
478         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
479                 if (encoder->base.crtc == crtc && encoder->type == type)
480                         return true;
481
482         return false;
483 }
484
485 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
486 /**
487  * Returns whether the given set of divisors are valid for a given refclk with
488  * the given connectors.
489  */
490
491 static bool intel_PLL_is_valid(struct drm_device *dev,
492                                const intel_limit_t *limit,
493                                const intel_clock_t *clock)
494 {
495         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
496                 INTELPllInvalid("p1 out of range\n");
497         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
498                 INTELPllInvalid("p out of range\n");
499         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
500                 INTELPllInvalid("m2 out of range\n");
501         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
502                 INTELPllInvalid("m1 out of range\n");
503         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
504                 INTELPllInvalid("m1 <= m2\n");
505         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
506                 INTELPllInvalid("m out of range\n");
507         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
508                 INTELPllInvalid("n out of range\n");
509         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
510                 INTELPllInvalid("vco out of range\n");
511         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
512          * connector, etc., rather than just a single range.
513          */
514         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
515                 INTELPllInvalid("dot out of range\n");
516
517         return true;
518 }
519
520 static bool
521 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
522                     int target, int refclk, intel_clock_t *match_clock,
523                     intel_clock_t *best_clock)
524
525 {
526         struct drm_device *dev = crtc->dev;
527         struct drm_i915_private *dev_priv = dev->dev_private;
528         intel_clock_t clock;
529         int err = target;
530
531         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
532             (I915_READ(LVDS)) != 0) {
533                 /*
534                  * For LVDS, if the panel is on, just rely on its current
535                  * settings for dual-channel.  We haven't figured out how to
536                  * reliably set up different single/dual channel state, if we
537                  * even can.
538                  */
539                 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
540                     LVDS_CLKB_POWER_UP)
541                         clock.p2 = limit->p2.p2_fast;
542                 else
543                         clock.p2 = limit->p2.p2_slow;
544         } else {
545                 if (target < limit->p2.dot_limit)
546                         clock.p2 = limit->p2.p2_slow;
547                 else
548                         clock.p2 = limit->p2.p2_fast;
549         }
550
551         memset(best_clock, 0, sizeof(*best_clock));
552
553         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
554              clock.m1++) {
555                 for (clock.m2 = limit->m2.min;
556                      clock.m2 <= limit->m2.max; clock.m2++) {
557                         /* m1 is always 0 in Pineview */
558                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
559                                 break;
560                         for (clock.n = limit->n.min;
561                              clock.n <= limit->n.max; clock.n++) {
562                                 for (clock.p1 = limit->p1.min;
563                                         clock.p1 <= limit->p1.max; clock.p1++) {
564                                         int this_err;
565
566                                         intel_clock(dev, refclk, &clock);
567                                         if (!intel_PLL_is_valid(dev, limit,
568                                                                 &clock))
569                                                 continue;
570                                         if (match_clock &&
571                                             clock.p != match_clock->p)
572                                                 continue;
573
574                                         this_err = abs(clock.dot - target);
575                                         if (this_err < err) {
576                                                 *best_clock = clock;
577                                                 err = this_err;
578                                         }
579                                 }
580                         }
581                 }
582         }
583
584         return (err != target);
585 }
586
587 static bool
588 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
589                         int target, int refclk, intel_clock_t *match_clock,
590                         intel_clock_t *best_clock)
591 {
592         struct drm_device *dev = crtc->dev;
593         struct drm_i915_private *dev_priv = dev->dev_private;
594         intel_clock_t clock;
595         int max_n;
596         bool found;
597         /* approximately equals target * 0.00585 */
598         int err_most = (target >> 8) + (target >> 9);
599         found = false;
600
601         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
602                 int lvds_reg;
603
604                 if (HAS_PCH_SPLIT(dev))
605                         lvds_reg = PCH_LVDS;
606                 else
607                         lvds_reg = LVDS;
608                 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
609                     LVDS_CLKB_POWER_UP)
610                         clock.p2 = limit->p2.p2_fast;
611                 else
612                         clock.p2 = limit->p2.p2_slow;
613         } else {
614                 if (target < limit->p2.dot_limit)
615                         clock.p2 = limit->p2.p2_slow;
616                 else
617                         clock.p2 = limit->p2.p2_fast;
618         }
619
620         memset(best_clock, 0, sizeof(*best_clock));
621         max_n = limit->n.max;
622         /* based on hardware requirement, prefer smaller n to precision */
623         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
624                 /* based on hardware requirement, prefere larger m1,m2 */
625                 for (clock.m1 = limit->m1.max;
626                      clock.m1 >= limit->m1.min; clock.m1--) {
627                         for (clock.m2 = limit->m2.max;
628                              clock.m2 >= limit->m2.min; clock.m2--) {
629                                 for (clock.p1 = limit->p1.max;
630                                      clock.p1 >= limit->p1.min; clock.p1--) {
631                                         int this_err;
632
633                                         intel_clock(dev, refclk, &clock);
634                                         if (!intel_PLL_is_valid(dev, limit,
635                                                                 &clock))
636                                                 continue;
637                                         if (match_clock &&
638                                             clock.p != match_clock->p)
639                                                 continue;
640
641                                         this_err = abs(clock.dot - target);
642                                         if (this_err < err_most) {
643                                                 *best_clock = clock;
644                                                 err_most = this_err;
645                                                 max_n = clock.n;
646                                                 found = true;
647                                         }
648                                 }
649                         }
650                 }
651         }
652         return found;
653 }
654
655 static bool
656 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
657                            int target, int refclk, intel_clock_t *match_clock,
658                            intel_clock_t *best_clock)
659 {
660         struct drm_device *dev = crtc->dev;
661         intel_clock_t clock;
662
663         if (target < 200000) {
664                 clock.n = 1;
665                 clock.p1 = 2;
666                 clock.p2 = 10;
667                 clock.m1 = 12;
668                 clock.m2 = 9;
669         } else {
670                 clock.n = 2;
671                 clock.p1 = 1;
672                 clock.p2 = 10;
673                 clock.m1 = 14;
674                 clock.m2 = 8;
675         }
676         intel_clock(dev, refclk, &clock);
677         memcpy(best_clock, &clock, sizeof(intel_clock_t));
678         return true;
679 }
680
681 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
682 static bool
683 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
684                       int target, int refclk, intel_clock_t *match_clock,
685                       intel_clock_t *best_clock)
686 {
687         intel_clock_t clock;
688         if (target < 200000) {
689                 clock.p1 = 2;
690                 clock.p2 = 10;
691                 clock.n = 2;
692                 clock.m1 = 23;
693                 clock.m2 = 8;
694         } else {
695                 clock.p1 = 1;
696                 clock.p2 = 10;
697                 clock.n = 1;
698                 clock.m1 = 14;
699                 clock.m2 = 2;
700         }
701         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
702         clock.p = (clock.p1 * clock.p2);
703         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
704         clock.vco = 0;
705         memcpy(best_clock, &clock, sizeof(intel_clock_t));
706         return true;
707 }
708
709 /**
710  * intel_wait_for_vblank - wait for vblank on a given pipe
711  * @dev: drm device
712  * @pipe: pipe to wait for
713  *
714  * Wait for vblank to occur on a given pipe.  Needed for various bits of
715  * mode setting code.
716  */
717 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
718 {
719         struct drm_i915_private *dev_priv = dev->dev_private;
720         int pipestat_reg = PIPESTAT(pipe);
721         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
722         int timeout = crtc->hwmode.vrefresh ?
723                 DIV_ROUND_UP(1000, crtc->hwmode.vrefresh) : 50;
724
725         /* Clear existing vblank status. Note this will clear any other
726          * sticky status fields as well.
727          *
728          * This races with i915_driver_irq_handler() with the result
729          * that either function could miss a vblank event.  Here it is not
730          * fatal, as we will either wait upon the next vblank interrupt or
731          * timeout.  Generally speaking intel_wait_for_vblank() is only
732          * called during modeset at which time the GPU should be idle and
733          * should *not* be performing page flips and thus not waiting on
734          * vblanks...
735          * Currently, the result of us stealing a vblank from the irq
736          * handler is that a single frame will be skipped during swapbuffers.
737          */
738         I915_WRITE(pipestat_reg,
739                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
740
741         /* Wait for vblank interrupt bit to set */
742         if (wait_for(I915_READ(pipestat_reg) &
743                      PIPE_VBLANK_INTERRUPT_STATUS,
744                      timeout))
745                 DRM_DEBUG_KMS("vblank wait timed out\n");
746 }
747
748 /*
749  * intel_wait_for_pipe_off - wait for pipe to turn off
750  * @dev: drm device
751  * @pipe: pipe to wait for
752  *
753  * After disabling a pipe, we can't wait for vblank in the usual way,
754  * spinning on the vblank interrupt status bit, since we won't actually
755  * see an interrupt when the pipe is disabled.
756  *
757  * On Gen4 and above:
758  *   wait for the pipe register state bit to turn off
759  *
760  * Otherwise:
761  *   wait for the display line value to settle (it usually
762  *   ends up stopping at the start of the next frame).
763  *
764  */
765 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
766 {
767         struct drm_i915_private *dev_priv = dev->dev_private;
768
769         if (INTEL_INFO(dev)->gen >= 4) {
770                 int reg = PIPECONF(pipe);
771
772                 /* Wait for the Pipe State to go off */
773                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
774                              100))
775                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
776         } else {
777                 u32 last_line;
778                 int reg = PIPEDSL(pipe);
779                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
780
781                 /* Wait for the display line to settle */
782                 do {
783                         last_line = I915_READ(reg) & DSL_LINEMASK;
784                         mdelay(5);
785                 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
786                          time_after(timeout, jiffies));
787                 if (time_after(jiffies, timeout))
788                         DRM_DEBUG_KMS("pipe_off wait timed out\n");
789         }
790 }
791
792 static const char *state_string(bool enabled)
793 {
794         return enabled ? "on" : "off";
795 }
796
797 /* Only for pre-ILK configs */
798 static void assert_pll(struct drm_i915_private *dev_priv,
799                        enum pipe pipe, bool state)
800 {
801         int reg;
802         u32 val;
803         bool cur_state;
804
805         reg = DPLL(pipe);
806         val = I915_READ(reg);
807         cur_state = !!(val & DPLL_VCO_ENABLE);
808         WARN(cur_state != state,
809              "PLL state assertion failure (expected %s, current %s)\n",
810              state_string(state), state_string(cur_state));
811 }
812 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
813 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
814
815 /* For ILK+ */
816 static void assert_pch_pll(struct drm_i915_private *dev_priv,
817                            enum pipe pipe, bool state)
818 {
819         int reg;
820         u32 val;
821         bool cur_state;
822
823         if (HAS_PCH_CPT(dev_priv->dev)) {
824                 u32 pch_dpll;
825
826                 pch_dpll = I915_READ(PCH_DPLL_SEL);
827
828                 /* Make sure the selected PLL is enabled to the transcoder */
829                 WARN(!((pch_dpll >> (4 * pipe)) & 8),
830                      "transcoder %d PLL not enabled\n", pipe);
831
832                 /* Convert the transcoder pipe number to a pll pipe number */
833                 pipe = (pch_dpll >> (4 * pipe)) & 1;
834         }
835
836         reg = PCH_DPLL(pipe);
837         val = I915_READ(reg);
838         cur_state = !!(val & DPLL_VCO_ENABLE);
839         WARN(cur_state != state,
840              "PCH PLL state assertion failure (expected %s, current %s)\n",
841              state_string(state), state_string(cur_state));
842 }
843 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
844 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
845
846 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847                           enum pipe pipe, bool state)
848 {
849         int reg;
850         u32 val;
851         bool cur_state;
852
853         reg = FDI_TX_CTL(pipe);
854         val = I915_READ(reg);
855         cur_state = !!(val & FDI_TX_ENABLE);
856         WARN(cur_state != state,
857              "FDI TX state assertion failure (expected %s, current %s)\n",
858              state_string(state), state_string(cur_state));
859 }
860 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
861 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
862
863 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864                           enum pipe pipe, bool state)
865 {
866         int reg;
867         u32 val;
868         bool cur_state;
869
870         reg = FDI_RX_CTL(pipe);
871         val = I915_READ(reg);
872         cur_state = !!(val & FDI_RX_ENABLE);
873         WARN(cur_state != state,
874              "FDI RX state assertion failure (expected %s, current %s)\n",
875              state_string(state), state_string(cur_state));
876 }
877 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
878 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
879
880 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
881                                       enum pipe pipe)
882 {
883         int reg;
884         u32 val;
885
886         /* ILK FDI PLL is always enabled */
887         if (dev_priv->info->gen == 5)
888                 return;
889
890         reg = FDI_TX_CTL(pipe);
891         val = I915_READ(reg);
892         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
893 }
894
895 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
896                                       enum pipe pipe)
897 {
898         int reg;
899         u32 val;
900
901         reg = FDI_RX_CTL(pipe);
902         val = I915_READ(reg);
903         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
904 }
905
906 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
907                                   enum pipe pipe)
908 {
909         int pp_reg, lvds_reg;
910         u32 val;
911         enum pipe panel_pipe = PIPE_A;
912         bool locked = true;
913
914         if (HAS_PCH_SPLIT(dev_priv->dev)) {
915                 pp_reg = PCH_PP_CONTROL;
916                 lvds_reg = PCH_LVDS;
917         } else {
918                 pp_reg = PP_CONTROL;
919                 lvds_reg = LVDS;
920         }
921
922         val = I915_READ(pp_reg);
923         if (!(val & PANEL_POWER_ON) ||
924             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
925                 locked = false;
926
927         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
928                 panel_pipe = PIPE_B;
929
930         WARN(panel_pipe == pipe && locked,
931              "panel assertion failure, pipe %c regs locked\n",
932              pipe_name(pipe));
933 }
934
935 void assert_pipe(struct drm_i915_private *dev_priv,
936                  enum pipe pipe, bool state)
937 {
938         int reg;
939         u32 val;
940         bool cur_state;
941
942         /* if we need the pipe A quirk it must be always on */
943         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
944                 state = true;
945
946         reg = PIPECONF(pipe);
947         val = I915_READ(reg);
948         cur_state = !!(val & PIPECONF_ENABLE);
949         WARN(cur_state != state,
950              "pipe %c assertion failure (expected %s, current %s)\n",
951              pipe_name(pipe), state_string(state), state_string(cur_state));
952 }
953
954 static void assert_plane(struct drm_i915_private *dev_priv,
955                          enum plane plane, bool state)
956 {
957         int reg;
958         u32 val;
959         bool cur_state;
960
961         reg = DSPCNTR(plane);
962         val = I915_READ(reg);
963         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
964         WARN(cur_state != state,
965              "plane %c assertion failure (expected %s, current %s)\n",
966              plane_name(plane), state_string(state), state_string(cur_state));
967 }
968
969 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
970 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
971
972 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
973                                    enum pipe pipe)
974 {
975         int reg, i;
976         u32 val;
977         int cur_pipe;
978
979         /* Planes are fixed to pipes on ILK+ */
980         if (HAS_PCH_SPLIT(dev_priv->dev)) {
981                 reg = DSPCNTR(pipe);
982                 val = I915_READ(reg);
983                 WARN((val & DISPLAY_PLANE_ENABLE),
984                      "plane %c assertion failure, should be disabled but not\n",
985                      plane_name(pipe));
986                 return;
987         }
988
989         /* Need to check both planes against the pipe */
990         for (i = 0; i < 2; i++) {
991                 reg = DSPCNTR(i);
992                 val = I915_READ(reg);
993                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
994                         DISPPLANE_SEL_PIPE_SHIFT;
995                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
996                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
997                      plane_name(i), pipe_name(pipe));
998         }
999 }
1000
1001 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1002 {
1003         u32 val;
1004         bool enabled;
1005
1006         val = I915_READ(PCH_DREF_CONTROL);
1007         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1008                             DREF_SUPERSPREAD_SOURCE_MASK));
1009         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1010 }
1011
1012 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1013                                        enum pipe pipe)
1014 {
1015         int reg;
1016         u32 val;
1017         bool enabled;
1018
1019         reg = TRANSCONF(pipe);
1020         val = I915_READ(reg);
1021         enabled = !!(val & TRANS_ENABLE);
1022         WARN(enabled,
1023              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1024              pipe_name(pipe));
1025 }
1026
1027 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1028                             enum pipe pipe, u32 port_sel, u32 val)
1029 {
1030         if ((val & DP_PORT_EN) == 0)
1031                 return false;
1032
1033         if (HAS_PCH_CPT(dev_priv->dev)) {
1034                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1035                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1036                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1037                         return false;
1038         } else {
1039                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1040                         return false;
1041         }
1042         return true;
1043 }
1044
1045 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1046                               enum pipe pipe, u32 val)
1047 {
1048         if ((val & PORT_ENABLE) == 0)
1049                 return false;
1050
1051         if (HAS_PCH_CPT(dev_priv->dev)) {
1052                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1053                         return false;
1054         } else {
1055                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1056                         return false;
1057         }
1058         return true;
1059 }
1060
1061 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1062                               enum pipe pipe, u32 val)
1063 {
1064         if ((val & LVDS_PORT_EN) == 0)
1065                 return false;
1066
1067         if (HAS_PCH_CPT(dev_priv->dev)) {
1068                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1069                         return false;
1070         } else {
1071                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1072                         return false;
1073         }
1074         return true;
1075 }
1076
1077 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1078                               enum pipe pipe, u32 val)
1079 {
1080         if ((val & ADPA_DAC_ENABLE) == 0)
1081                 return false;
1082         if (HAS_PCH_CPT(dev_priv->dev)) {
1083                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1084                         return false;
1085         } else {
1086                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1087                         return false;
1088         }
1089         return true;
1090 }
1091
1092 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1093                                    enum pipe pipe, int reg, u32 port_sel)
1094 {
1095         u32 val = I915_READ(reg);
1096         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1097              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1098              reg, pipe_name(pipe));
1099 }
1100
1101 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1102                                      enum pipe pipe, int reg)
1103 {
1104         u32 val = I915_READ(reg);
1105         WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1106              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1107              reg, pipe_name(pipe));
1108 }
1109
1110 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1111                                       enum pipe pipe)
1112 {
1113         int reg;
1114         u32 val;
1115
1116         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1117         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1118         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1119
1120         reg = PCH_ADPA;
1121         val = I915_READ(reg);
1122         WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1123              "PCH VGA enabled on transcoder %c, should be disabled\n",
1124              pipe_name(pipe));
1125
1126         reg = PCH_LVDS;
1127         val = I915_READ(reg);
1128         WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1129              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1130              pipe_name(pipe));
1131
1132         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1133         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1134         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1135 }
1136
1137 /**
1138  * intel_enable_pll - enable a PLL
1139  * @dev_priv: i915 private structure
1140  * @pipe: pipe PLL to enable
1141  *
1142  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1143  * make sure the PLL reg is writable first though, since the panel write
1144  * protect mechanism may be enabled.
1145  *
1146  * Note!  This is for pre-ILK only.
1147  */
1148 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1149 {
1150         int reg;
1151         u32 val;
1152
1153         /* No really, not for ILK+ */
1154         BUG_ON(dev_priv->info->gen >= 5);
1155
1156         /* PLL is protected by panel, make sure we can write it */
1157         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1158                 assert_panel_unlocked(dev_priv, pipe);
1159
1160         reg = DPLL(pipe);
1161         val = I915_READ(reg);
1162         val |= DPLL_VCO_ENABLE;
1163
1164         /* We do this three times for luck */
1165         I915_WRITE(reg, val);
1166         POSTING_READ(reg);
1167         udelay(150); /* wait for warmup */
1168         I915_WRITE(reg, val);
1169         POSTING_READ(reg);
1170         udelay(150); /* wait for warmup */
1171         I915_WRITE(reg, val);
1172         POSTING_READ(reg);
1173         udelay(150); /* wait for warmup */
1174 }
1175
1176 /**
1177  * intel_disable_pll - disable a PLL
1178  * @dev_priv: i915 private structure
1179  * @pipe: pipe PLL to disable
1180  *
1181  * Disable the PLL for @pipe, making sure the pipe is off first.
1182  *
1183  * Note!  This is for pre-ILK only.
1184  */
1185 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187         int reg;
1188         u32 val;
1189
1190         /* Don't disable pipe A or pipe A PLLs if needed */
1191         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1192                 return;
1193
1194         /* Make sure the pipe isn't still relying on us */
1195         assert_pipe_disabled(dev_priv, pipe);
1196
1197         reg = DPLL(pipe);
1198         val = I915_READ(reg);
1199         val &= ~DPLL_VCO_ENABLE;
1200         I915_WRITE(reg, val);
1201         POSTING_READ(reg);
1202 }
1203
1204 /**
1205  * intel_enable_pch_pll - enable PCH PLL
1206  * @dev_priv: i915 private structure
1207  * @pipe: pipe PLL to enable
1208  *
1209  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1210  * drives the transcoder clock.
1211  */
1212 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1213                                  enum pipe pipe)
1214 {
1215         int reg;
1216         u32 val;
1217
1218         if (pipe > 1)
1219                 return;
1220
1221         /* PCH only available on ILK+ */
1222         BUG_ON(dev_priv->info->gen < 5);
1223
1224         /* PCH refclock must be enabled first */
1225         assert_pch_refclk_enabled(dev_priv);
1226
1227         reg = PCH_DPLL(pipe);
1228         val = I915_READ(reg);
1229         val |= DPLL_VCO_ENABLE;
1230         I915_WRITE(reg, val);
1231         POSTING_READ(reg);
1232         udelay(200);
1233 }
1234
1235 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1236                                   enum pipe pipe)
1237 {
1238         int reg;
1239         u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1240                 pll_sel = TRANSC_DPLL_ENABLE;
1241
1242         if (pipe > 1)
1243                 return;
1244
1245         /* PCH only available on ILK+ */
1246         BUG_ON(dev_priv->info->gen < 5);
1247
1248         /* Make sure transcoder isn't still depending on us */
1249         assert_transcoder_disabled(dev_priv, pipe);
1250
1251         if (pipe == 0)
1252                 pll_sel |= TRANSC_DPLLA_SEL;
1253         else if (pipe == 1)
1254                 pll_sel |= TRANSC_DPLLB_SEL;
1255
1256
1257         if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1258                 return;
1259
1260         reg = PCH_DPLL(pipe);
1261         val = I915_READ(reg);
1262         val &= ~DPLL_VCO_ENABLE;
1263         I915_WRITE(reg, val);
1264         POSTING_READ(reg);
1265         udelay(200);
1266 }
1267
1268 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1269                                     enum pipe pipe)
1270 {
1271         int reg;
1272         u32 val, pipeconf_val;
1273         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1274
1275         /* PCH only available on ILK+ */
1276         BUG_ON(dev_priv->info->gen < 5);
1277
1278         /* Make sure PCH DPLL is enabled */
1279         assert_pch_pll_enabled(dev_priv, pipe);
1280
1281         /* FDI must be feeding us bits for PCH ports */
1282         assert_fdi_tx_enabled(dev_priv, pipe);
1283         assert_fdi_rx_enabled(dev_priv, pipe);
1284
1285         reg = TRANSCONF(pipe);
1286         val = I915_READ(reg);
1287         pipeconf_val = I915_READ(PIPECONF(pipe));
1288
1289         if (HAS_PCH_IBX(dev_priv->dev)) {
1290                 /*
1291                  * make the BPC in transcoder be consistent with
1292                  * that in pipeconf reg.
1293                  */
1294                 val &= ~PIPE_BPC_MASK;
1295                 val |= pipeconf_val & PIPE_BPC_MASK;
1296         }
1297
1298         val &= ~TRANS_INTERLACE_MASK;
1299         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1300                 if (HAS_PCH_IBX(dev_priv->dev) &&
1301                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1302                         val |= TRANS_LEGACY_INTERLACED_ILK;
1303                 else
1304                         val |= TRANS_INTERLACED;
1305         else
1306                 val |= TRANS_PROGRESSIVE;
1307
1308         I915_WRITE(reg, val | TRANS_ENABLE);
1309         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1310                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1311 }
1312
1313 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1314                                      enum pipe pipe)
1315 {
1316         int reg;
1317         u32 val;
1318
1319         /* FDI relies on the transcoder */
1320         assert_fdi_tx_disabled(dev_priv, pipe);
1321         assert_fdi_rx_disabled(dev_priv, pipe);
1322
1323         /* Ports must be off as well */
1324         assert_pch_ports_disabled(dev_priv, pipe);
1325
1326         reg = TRANSCONF(pipe);
1327         val = I915_READ(reg);
1328         val &= ~TRANS_ENABLE;
1329         I915_WRITE(reg, val);
1330         /* wait for PCH transcoder off, transcoder state */
1331         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1332                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1333 }
1334
1335 /**
1336  * intel_enable_pipe - enable a pipe, asserting requirements
1337  * @dev_priv: i915 private structure
1338  * @pipe: pipe to enable
1339  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1340  *
1341  * Enable @pipe, making sure that various hardware specific requirements
1342  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1343  *
1344  * @pipe should be %PIPE_A or %PIPE_B.
1345  *
1346  * Will wait until the pipe is actually running (i.e. first vblank) before
1347  * returning.
1348  */
1349 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1350                               bool pch_port)
1351 {
1352         int reg;
1353         u32 val;
1354
1355         /*
1356          * A pipe without a PLL won't actually be able to drive bits from
1357          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1358          * need the check.
1359          */
1360         if (!HAS_PCH_SPLIT(dev_priv->dev))
1361                 assert_pll_enabled(dev_priv, pipe);
1362         else {
1363                 if (pch_port) {
1364                         /* if driving the PCH, we need FDI enabled */
1365                         assert_fdi_rx_pll_enabled(dev_priv, pipe);
1366                         assert_fdi_tx_pll_enabled(dev_priv, pipe);
1367                 }
1368                 /* FIXME: assert CPU port conditions for SNB+ */
1369         }
1370
1371         reg = PIPECONF(pipe);
1372         val = I915_READ(reg);
1373         if (val & PIPECONF_ENABLE)
1374                 return;
1375
1376         I915_WRITE(reg, val | PIPECONF_ENABLE);
1377         intel_wait_for_vblank(dev_priv->dev, pipe);
1378 }
1379
1380 /**
1381  * intel_disable_pipe - disable a pipe, asserting requirements
1382  * @dev_priv: i915 private structure
1383  * @pipe: pipe to disable
1384  *
1385  * Disable @pipe, making sure that various hardware specific requirements
1386  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1387  *
1388  * @pipe should be %PIPE_A or %PIPE_B.
1389  *
1390  * Will wait until the pipe has shut down before returning.
1391  */
1392 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1393                                enum pipe pipe)
1394 {
1395         int reg;
1396         u32 val;
1397
1398         /*
1399          * Make sure planes won't keep trying to pump pixels to us,
1400          * or we might hang the display.
1401          */
1402         assert_planes_disabled(dev_priv, pipe);
1403
1404         /* Don't disable pipe A or pipe A PLLs if needed */
1405         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1406                 return;
1407
1408         reg = PIPECONF(pipe);
1409         val = I915_READ(reg);
1410         if ((val & PIPECONF_ENABLE) == 0)
1411                 return;
1412
1413         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1414         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1415 }
1416
1417 /*
1418  * Plane regs are double buffered, going from enabled->disabled needs a
1419  * trigger in order to latch.  The display address reg provides this.
1420  */
1421 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1422                                       enum plane plane)
1423 {
1424         I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1425         I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1426 }
1427
1428 /**
1429  * intel_enable_plane - enable a display plane on a given pipe
1430  * @dev_priv: i915 private structure
1431  * @plane: plane to enable
1432  * @pipe: pipe being fed
1433  *
1434  * Enable @plane on @pipe, making sure that @pipe is running first.
1435  */
1436 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1437                                enum plane plane, enum pipe pipe)
1438 {
1439         int reg;
1440         u32 val;
1441
1442         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1443         assert_pipe_enabled(dev_priv, pipe);
1444
1445         reg = DSPCNTR(plane);
1446         val = I915_READ(reg);
1447         if (val & DISPLAY_PLANE_ENABLE)
1448                 return;
1449
1450         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1451         intel_flush_display_plane(dev_priv, plane);
1452         intel_wait_for_vblank(dev_priv->dev, pipe);
1453 }
1454
1455 /**
1456  * intel_disable_plane - disable a display plane
1457  * @dev_priv: i915 private structure
1458  * @plane: plane to disable
1459  * @pipe: pipe consuming the data
1460  *
1461  * Disable @plane; should be an independent operation.
1462  */
1463 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1464                                 enum plane plane, enum pipe pipe)
1465 {
1466         int reg;
1467         u32 val;
1468
1469         reg = DSPCNTR(plane);
1470         val = I915_READ(reg);
1471         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1472                 return;
1473
1474         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1475         intel_flush_display_plane(dev_priv, plane);
1476         intel_wait_for_vblank(dev_priv->dev, pipe);
1477 }
1478
1479 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1480                            enum pipe pipe, int reg, u32 port_sel)
1481 {
1482         u32 val = I915_READ(reg);
1483         if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1484                 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1485                 I915_WRITE(reg, val & ~DP_PORT_EN);
1486         }
1487 }
1488
1489 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1490                              enum pipe pipe, int reg)
1491 {
1492         u32 val = I915_READ(reg);
1493         if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1494                 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1495                               reg, pipe);
1496                 I915_WRITE(reg, val & ~PORT_ENABLE);
1497         }
1498 }
1499
1500 /* Disable any ports connected to this transcoder */
1501 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1502                                     enum pipe pipe)
1503 {
1504         u32 reg, val;
1505
1506         val = I915_READ(PCH_PP_CONTROL);
1507         I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1508
1509         disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1510         disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1511         disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1512
1513         reg = PCH_ADPA;
1514         val = I915_READ(reg);
1515         if (adpa_pipe_enabled(dev_priv, val, pipe))
1516                 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1517
1518         reg = PCH_LVDS;
1519         val = I915_READ(reg);
1520         if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1521                 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1522                 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1523                 POSTING_READ(reg);
1524                 udelay(100);
1525         }
1526
1527         disable_pch_hdmi(dev_priv, pipe, HDMIB);
1528         disable_pch_hdmi(dev_priv, pipe, HDMIC);
1529         disable_pch_hdmi(dev_priv, pipe, HDMID);
1530 }
1531
1532 static void i8xx_disable_fbc(struct drm_device *dev)
1533 {
1534         struct drm_i915_private *dev_priv = dev->dev_private;
1535         u32 fbc_ctl;
1536
1537         /* Disable compression */
1538         fbc_ctl = I915_READ(FBC_CONTROL);
1539         if ((fbc_ctl & FBC_CTL_EN) == 0)
1540                 return;
1541
1542         fbc_ctl &= ~FBC_CTL_EN;
1543         I915_WRITE(FBC_CONTROL, fbc_ctl);
1544
1545         /* Wait for compressing bit to clear */
1546         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1547                 DRM_DEBUG_KMS("FBC idle timed out\n");
1548                 return;
1549         }
1550
1551         DRM_DEBUG_KMS("disabled FBC\n");
1552 }
1553
1554 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1555 {
1556         struct drm_device *dev = crtc->dev;
1557         struct drm_i915_private *dev_priv = dev->dev_private;
1558         struct drm_framebuffer *fb = crtc->fb;
1559         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1560         struct drm_i915_gem_object *obj = intel_fb->obj;
1561         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1562         int cfb_pitch;
1563         int plane, i;
1564         u32 fbc_ctl, fbc_ctl2;
1565
1566         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1567         if (fb->pitches[0] < cfb_pitch)
1568                 cfb_pitch = fb->pitches[0];
1569
1570         /* FBC_CTL wants 64B units */
1571         cfb_pitch = (cfb_pitch / 64) - 1;
1572         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1573
1574         /* Clear old tags */
1575         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1576                 I915_WRITE(FBC_TAG + (i * 4), 0);
1577
1578         /* Set it up... */
1579         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1580         fbc_ctl2 |= plane;
1581         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1582         I915_WRITE(FBC_FENCE_OFF, crtc->y);
1583
1584         /* enable it... */
1585         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1586         if (IS_I945GM(dev))
1587                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1588         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1589         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1590         fbc_ctl |= obj->fence_reg;
1591         I915_WRITE(FBC_CONTROL, fbc_ctl);
1592
1593         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1594                       cfb_pitch, crtc->y, intel_crtc->plane);
1595 }
1596
1597 static bool i8xx_fbc_enabled(struct drm_device *dev)
1598 {
1599         struct drm_i915_private *dev_priv = dev->dev_private;
1600
1601         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1602 }
1603
1604 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1605 {
1606         struct drm_device *dev = crtc->dev;
1607         struct drm_i915_private *dev_priv = dev->dev_private;
1608         struct drm_framebuffer *fb = crtc->fb;
1609         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1610         struct drm_i915_gem_object *obj = intel_fb->obj;
1611         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1612         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1613         unsigned long stall_watermark = 200;
1614         u32 dpfc_ctl;
1615
1616         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1617         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1618         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1619
1620         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1621                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1622                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1623         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1624
1625         /* enable it... */
1626         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1627
1628         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1629 }
1630
1631 static void g4x_disable_fbc(struct drm_device *dev)
1632 {
1633         struct drm_i915_private *dev_priv = dev->dev_private;
1634         u32 dpfc_ctl;
1635
1636         /* Disable compression */
1637         dpfc_ctl = I915_READ(DPFC_CONTROL);
1638         if (dpfc_ctl & DPFC_CTL_EN) {
1639                 dpfc_ctl &= ~DPFC_CTL_EN;
1640                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1641
1642                 DRM_DEBUG_KMS("disabled FBC\n");
1643         }
1644 }
1645
1646 static bool g4x_fbc_enabled(struct drm_device *dev)
1647 {
1648         struct drm_i915_private *dev_priv = dev->dev_private;
1649
1650         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1651 }
1652
1653 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1654 {
1655         struct drm_i915_private *dev_priv = dev->dev_private;
1656         u32 blt_ecoskpd;
1657
1658         /* Make sure blitter notifies FBC of writes */
1659         gen6_gt_force_wake_get(dev_priv);
1660         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1661         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1662                 GEN6_BLITTER_LOCK_SHIFT;
1663         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1664         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1665         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1666         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1667                          GEN6_BLITTER_LOCK_SHIFT);
1668         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1669         POSTING_READ(GEN6_BLITTER_ECOSKPD);
1670         gen6_gt_force_wake_put(dev_priv);
1671 }
1672
1673 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1674 {
1675         struct drm_device *dev = crtc->dev;
1676         struct drm_i915_private *dev_priv = dev->dev_private;
1677         struct drm_framebuffer *fb = crtc->fb;
1678         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1679         struct drm_i915_gem_object *obj = intel_fb->obj;
1680         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1681         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1682         unsigned long stall_watermark = 200;
1683         u32 dpfc_ctl;
1684
1685         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1686         dpfc_ctl &= DPFC_RESERVED;
1687         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1688         /* Set persistent mode for front-buffer rendering, ala X. */
1689         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1690         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1691         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1692
1693         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1694                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1695                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1696         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1697         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1698         /* enable it... */
1699         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1700
1701         if (IS_GEN6(dev)) {
1702                 I915_WRITE(SNB_DPFC_CTL_SA,
1703                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1704                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1705                 sandybridge_blit_fbc_update(dev);
1706         }
1707
1708         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1709 }
1710
1711 static void ironlake_disable_fbc(struct drm_device *dev)
1712 {
1713         struct drm_i915_private *dev_priv = dev->dev_private;
1714         u32 dpfc_ctl;
1715
1716         /* Disable compression */
1717         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1718         if (dpfc_ctl & DPFC_CTL_EN) {
1719                 dpfc_ctl &= ~DPFC_CTL_EN;
1720                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1721
1722                 DRM_DEBUG_KMS("disabled FBC\n");
1723         }
1724 }
1725
1726 static bool ironlake_fbc_enabled(struct drm_device *dev)
1727 {
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729
1730         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1731 }
1732
1733 bool intel_fbc_enabled(struct drm_device *dev)
1734 {
1735         struct drm_i915_private *dev_priv = dev->dev_private;
1736
1737         if (!dev_priv->display.fbc_enabled)
1738                 return false;
1739
1740         return dev_priv->display.fbc_enabled(dev);
1741 }
1742
1743 static void intel_fbc_work_fn(struct work_struct *__work)
1744 {
1745         struct intel_fbc_work *work =
1746                 container_of(to_delayed_work(__work),
1747                              struct intel_fbc_work, work);
1748         struct drm_device *dev = work->crtc->dev;
1749         struct drm_i915_private *dev_priv = dev->dev_private;
1750
1751         mutex_lock(&dev->struct_mutex);
1752         if (work == dev_priv->fbc_work) {
1753                 /* Double check that we haven't switched fb without cancelling
1754                  * the prior work.
1755                  */
1756                 if (work->crtc->fb == work->fb) {
1757                         dev_priv->display.enable_fbc(work->crtc,
1758                                                      work->interval);
1759
1760                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1761                         dev_priv->cfb_fb = work->crtc->fb->base.id;
1762                         dev_priv->cfb_y = work->crtc->y;
1763                 }
1764
1765                 dev_priv->fbc_work = NULL;
1766         }
1767         mutex_unlock(&dev->struct_mutex);
1768
1769         kfree(work);
1770 }
1771
1772 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1773 {
1774         if (dev_priv->fbc_work == NULL)
1775                 return;
1776
1777         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1778
1779         /* Synchronisation is provided by struct_mutex and checking of
1780          * dev_priv->fbc_work, so we can perform the cancellation
1781          * entirely asynchronously.
1782          */
1783         if (cancel_delayed_work(&dev_priv->fbc_work->work))
1784                 /* tasklet was killed before being run, clean up */
1785                 kfree(dev_priv->fbc_work);
1786
1787         /* Mark the work as no longer wanted so that if it does
1788          * wake-up (because the work was already running and waiting
1789          * for our mutex), it will discover that is no longer
1790          * necessary to run.
1791          */
1792         dev_priv->fbc_work = NULL;
1793 }
1794
1795 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1796 {
1797         struct intel_fbc_work *work;
1798         struct drm_device *dev = crtc->dev;
1799         struct drm_i915_private *dev_priv = dev->dev_private;
1800
1801         if (!dev_priv->display.enable_fbc)
1802                 return;
1803
1804         intel_cancel_fbc_work(dev_priv);
1805
1806         work = kzalloc(sizeof *work, GFP_KERNEL);
1807         if (work == NULL) {
1808                 dev_priv->display.enable_fbc(crtc, interval);
1809                 return;
1810         }
1811
1812         work->crtc = crtc;
1813         work->fb = crtc->fb;
1814         work->interval = interval;
1815         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1816
1817         dev_priv->fbc_work = work;
1818
1819         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1820
1821         /* Delay the actual enabling to let pageflipping cease and the
1822          * display to settle before starting the compression. Note that
1823          * this delay also serves a second purpose: it allows for a
1824          * vblank to pass after disabling the FBC before we attempt
1825          * to modify the control registers.
1826          *
1827          * A more complicated solution would involve tracking vblanks
1828          * following the termination of the page-flipping sequence
1829          * and indeed performing the enable as a co-routine and not
1830          * waiting synchronously upon the vblank.
1831          */
1832         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1833 }
1834
1835 void intel_disable_fbc(struct drm_device *dev)
1836 {
1837         struct drm_i915_private *dev_priv = dev->dev_private;
1838
1839         intel_cancel_fbc_work(dev_priv);
1840
1841         if (!dev_priv->display.disable_fbc)
1842                 return;
1843
1844         dev_priv->display.disable_fbc(dev);
1845         dev_priv->cfb_plane = -1;
1846 }
1847
1848 /**
1849  * intel_update_fbc - enable/disable FBC as needed
1850  * @dev: the drm_device
1851  *
1852  * Set up the framebuffer compression hardware at mode set time.  We
1853  * enable it if possible:
1854  *   - plane A only (on pre-965)
1855  *   - no pixel mulitply/line duplication
1856  *   - no alpha buffer discard
1857  *   - no dual wide
1858  *   - framebuffer <= 2048 in width, 1536 in height
1859  *
1860  * We can't assume that any compression will take place (worst case),
1861  * so the compressed buffer has to be the same size as the uncompressed
1862  * one.  It also must reside (along with the line length buffer) in
1863  * stolen memory.
1864  *
1865  * We need to enable/disable FBC on a global basis.
1866  */
1867 static void intel_update_fbc(struct drm_device *dev)
1868 {
1869         struct drm_i915_private *dev_priv = dev->dev_private;
1870         struct drm_crtc *crtc = NULL, *tmp_crtc;
1871         struct intel_crtc *intel_crtc;
1872         struct drm_framebuffer *fb;
1873         struct intel_framebuffer *intel_fb;
1874         struct drm_i915_gem_object *obj;
1875         int enable_fbc;
1876
1877         DRM_DEBUG_KMS("\n");
1878
1879         if (!i915_powersave)
1880                 return;
1881
1882         if (!I915_HAS_FBC(dev))
1883                 return;
1884
1885         /*
1886          * If FBC is already on, we just have to verify that we can
1887          * keep it that way...
1888          * Need to disable if:
1889          *   - more than one pipe is active
1890          *   - changing FBC params (stride, fence, mode)
1891          *   - new fb is too large to fit in compressed buffer
1892          *   - going to an unsupported config (interlace, pixel multiply, etc.)
1893          */
1894         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1895                 if (tmp_crtc->enabled && tmp_crtc->fb) {
1896                         if (crtc) {
1897                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1898                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1899                                 goto out_disable;
1900                         }
1901                         crtc = tmp_crtc;
1902                 }
1903         }
1904
1905         if (!crtc || crtc->fb == NULL) {
1906                 DRM_DEBUG_KMS("no output, disabling\n");
1907                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1908                 goto out_disable;
1909         }
1910
1911         intel_crtc = to_intel_crtc(crtc);
1912         fb = crtc->fb;
1913         intel_fb = to_intel_framebuffer(fb);
1914         obj = intel_fb->obj;
1915
1916         enable_fbc = i915_enable_fbc;
1917         if (enable_fbc < 0) {
1918                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1919                 enable_fbc = 1;
1920                 if (INTEL_INFO(dev)->gen <= 6)
1921                         enable_fbc = 0;
1922         }
1923         if (!enable_fbc) {
1924                 DRM_DEBUG_KMS("fbc disabled per module param\n");
1925                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1926                 goto out_disable;
1927         }
1928         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1929                 DRM_DEBUG_KMS("framebuffer too large, disabling "
1930                               "compression\n");
1931                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1932                 goto out_disable;
1933         }
1934         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1935             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1936                 DRM_DEBUG_KMS("mode incompatible with compression, "
1937                               "disabling\n");
1938                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1939                 goto out_disable;
1940         }
1941         if ((crtc->mode.hdisplay > 2048) ||
1942             (crtc->mode.vdisplay > 1536)) {
1943                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1944                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1945                 goto out_disable;
1946         }
1947         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1948                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1949                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1950                 goto out_disable;
1951         }
1952
1953         /* The use of a CPU fence is mandatory in order to detect writes
1954          * by the CPU to the scanout and trigger updates to the FBC.
1955          */
1956         if (obj->tiling_mode != I915_TILING_X ||
1957             obj->fence_reg == I915_FENCE_REG_NONE) {
1958                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1959                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1960                 goto out_disable;
1961         }
1962
1963         /* If the kernel debugger is active, always disable compression */
1964         if (in_dbg_master())
1965                 goto out_disable;
1966
1967         /* If the scanout has not changed, don't modify the FBC settings.
1968          * Note that we make the fundamental assumption that the fb->obj
1969          * cannot be unpinned (and have its GTT offset and fence revoked)
1970          * without first being decoupled from the scanout and FBC disabled.
1971          */
1972         if (dev_priv->cfb_plane == intel_crtc->plane &&
1973             dev_priv->cfb_fb == fb->base.id &&
1974             dev_priv->cfb_y == crtc->y)
1975                 return;
1976
1977         if (intel_fbc_enabled(dev)) {
1978                 /* We update FBC along two paths, after changing fb/crtc
1979                  * configuration (modeswitching) and after page-flipping
1980                  * finishes. For the latter, we know that not only did
1981                  * we disable the FBC at the start of the page-flip
1982                  * sequence, but also more than one vblank has passed.
1983                  *
1984                  * For the former case of modeswitching, it is possible
1985                  * to switch between two FBC valid configurations
1986                  * instantaneously so we do need to disable the FBC
1987                  * before we can modify its control registers. We also
1988                  * have to wait for the next vblank for that to take
1989                  * effect. However, since we delay enabling FBC we can
1990                  * assume that a vblank has passed since disabling and
1991                  * that we can safely alter the registers in the deferred
1992                  * callback.
1993                  *
1994                  * In the scenario that we go from a valid to invalid
1995                  * and then back to valid FBC configuration we have
1996                  * no strict enforcement that a vblank occurred since
1997                  * disabling the FBC. However, along all current pipe
1998                  * disabling paths we do need to wait for a vblank at
1999                  * some point. And we wait before enabling FBC anyway.
2000                  */
2001                 DRM_DEBUG_KMS("disabling active FBC for update\n");
2002                 intel_disable_fbc(dev);
2003         }
2004
2005         intel_enable_fbc(crtc, 500);
2006         return;
2007
2008 out_disable:
2009         /* Multiple disables should be harmless */
2010         if (intel_fbc_enabled(dev)) {
2011                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2012                 intel_disable_fbc(dev);
2013         }
2014 }
2015
2016 int
2017 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2018                            struct drm_i915_gem_object *obj,
2019                            struct intel_ring_buffer *pipelined)
2020 {
2021         struct drm_i915_private *dev_priv = dev->dev_private;
2022         u32 alignment;
2023         int ret;
2024
2025         switch (obj->tiling_mode) {
2026         case I915_TILING_NONE:
2027                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2028                         alignment = 128 * 1024;
2029                 else if (INTEL_INFO(dev)->gen >= 4)
2030                         alignment = 4 * 1024;
2031                 else
2032                         alignment = 64 * 1024;
2033                 break;
2034         case I915_TILING_X:
2035                 /* pin() will align the object as required by fence */
2036                 alignment = 0;
2037                 break;
2038         case I915_TILING_Y:
2039                 /* FIXME: Is this true? */
2040                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2041                 return -EINVAL;
2042         default:
2043                 BUG();
2044         }
2045
2046         dev_priv->mm.interruptible = false;
2047         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2048         if (ret)
2049                 goto err_interruptible;
2050
2051         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2052          * fence, whereas 965+ only requires a fence if using
2053          * framebuffer compression.  For simplicity, we always install
2054          * a fence as the cost is not that onerous.
2055          */
2056         if (obj->tiling_mode != I915_TILING_NONE) {
2057                 ret = i915_gem_object_get_fence(obj, pipelined);
2058                 if (ret)
2059                         goto err_unpin;
2060
2061                 i915_gem_object_pin_fence(obj);
2062         }
2063
2064         dev_priv->mm.interruptible = true;
2065         return 0;
2066
2067 err_unpin:
2068         i915_gem_object_unpin(obj);
2069 err_interruptible:
2070         dev_priv->mm.interruptible = true;
2071         return ret;
2072 }
2073
2074 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2075 {
2076         i915_gem_object_unpin_fence(obj);
2077         i915_gem_object_unpin(obj);
2078 }
2079
2080 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2081                              int x, int y)
2082 {
2083         struct drm_device *dev = crtc->dev;
2084         struct drm_i915_private *dev_priv = dev->dev_private;
2085         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2086         struct intel_framebuffer *intel_fb;
2087         struct drm_i915_gem_object *obj;
2088         int plane = intel_crtc->plane;
2089         unsigned long Start, Offset;
2090         u32 dspcntr;
2091         u32 reg;
2092
2093         switch (plane) {
2094         case 0:
2095         case 1:
2096                 break;
2097         default:
2098                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2099                 return -EINVAL;
2100         }
2101
2102         intel_fb = to_intel_framebuffer(fb);
2103         obj = intel_fb->obj;
2104
2105         reg = DSPCNTR(plane);
2106         dspcntr = I915_READ(reg);
2107         /* Mask out pixel format bits in case we change it */
2108         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2109         switch (fb->bits_per_pixel) {
2110         case 8:
2111                 dspcntr |= DISPPLANE_8BPP;
2112                 break;
2113         case 16:
2114                 if (fb->depth == 15)
2115                         dspcntr |= DISPPLANE_15_16BPP;
2116                 else
2117                         dspcntr |= DISPPLANE_16BPP;
2118                 break;
2119         case 24:
2120         case 32:
2121                 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2122                 break;
2123         default:
2124                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2125                 return -EINVAL;
2126         }
2127         if (INTEL_INFO(dev)->gen >= 4) {
2128                 if (obj->tiling_mode != I915_TILING_NONE)
2129                         dspcntr |= DISPPLANE_TILED;
2130                 else
2131                         dspcntr &= ~DISPPLANE_TILED;
2132         }
2133
2134         I915_WRITE(reg, dspcntr);
2135
2136         Start = obj->gtt_offset;
2137         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2138
2139         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2140                       Start, Offset, x, y, fb->pitches[0]);
2141         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2142         if (INTEL_INFO(dev)->gen >= 4) {
2143                 I915_WRITE(DSPSURF(plane), Start);
2144                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2145                 I915_WRITE(DSPADDR(plane), Offset);
2146         } else
2147                 I915_WRITE(DSPADDR(plane), Start + Offset);
2148         POSTING_READ(reg);
2149
2150         return 0;
2151 }
2152
2153 static int ironlake_update_plane(struct drm_crtc *crtc,
2154                                  struct drm_framebuffer *fb, int x, int y)
2155 {
2156         struct drm_device *dev = crtc->dev;
2157         struct drm_i915_private *dev_priv = dev->dev_private;
2158         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159         struct intel_framebuffer *intel_fb;
2160         struct drm_i915_gem_object *obj;
2161         int plane = intel_crtc->plane;
2162         unsigned long Start, Offset;
2163         u32 dspcntr;
2164         u32 reg;
2165
2166         switch (plane) {
2167         case 0:
2168         case 1:
2169         case 2:
2170                 break;
2171         default:
2172                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2173                 return -EINVAL;
2174         }
2175
2176         intel_fb = to_intel_framebuffer(fb);
2177         obj = intel_fb->obj;
2178
2179         reg = DSPCNTR(plane);
2180         dspcntr = I915_READ(reg);
2181         /* Mask out pixel format bits in case we change it */
2182         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2183         switch (fb->bits_per_pixel) {
2184         case 8:
2185                 dspcntr |= DISPPLANE_8BPP;
2186                 break;
2187         case 16:
2188                 if (fb->depth != 16)
2189                         return -EINVAL;
2190
2191                 dspcntr |= DISPPLANE_16BPP;
2192                 break;
2193         case 24:
2194         case 32:
2195                 if (fb->depth == 24)
2196                         dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2197                 else if (fb->depth == 30)
2198                         dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2199                 else
2200                         return -EINVAL;
2201                 break;
2202         default:
2203                 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2204                 return -EINVAL;
2205         }
2206
2207         if (obj->tiling_mode != I915_TILING_NONE)
2208                 dspcntr |= DISPPLANE_TILED;
2209         else
2210                 dspcntr &= ~DISPPLANE_TILED;
2211
2212         /* must disable */
2213         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2214
2215         I915_WRITE(reg, dspcntr);
2216
2217         Start = obj->gtt_offset;
2218         Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2219
2220         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2221                       Start, Offset, x, y, fb->pitches[0]);
2222         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2223         I915_WRITE(DSPSURF(plane), Start);
2224         I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2225         I915_WRITE(DSPADDR(plane), Offset);
2226         POSTING_READ(reg);
2227
2228         return 0;
2229 }
2230
2231 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2232 static int
2233 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2234                            int x, int y, enum mode_set_atomic state)
2235 {
2236         struct drm_device *dev = crtc->dev;
2237         struct drm_i915_private *dev_priv = dev->dev_private;
2238         int ret;
2239
2240         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2241         if (ret)
2242                 return ret;
2243
2244         intel_update_fbc(dev);
2245         intel_increase_pllclock(crtc);
2246
2247         return 0;
2248 }
2249
2250 static int
2251 intel_finish_fb(struct drm_framebuffer *old_fb)
2252 {
2253         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2254         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2255         bool was_interruptible = dev_priv->mm.interruptible;
2256         int ret;
2257
2258         wait_event(dev_priv->pending_flip_queue,
2259                    atomic_read(&dev_priv->mm.wedged) ||
2260                    atomic_read(&obj->pending_flip) == 0);
2261
2262         /* Big Hammer, we also need to ensure that any pending
2263          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2264          * current scanout is retired before unpinning the old
2265          * framebuffer.
2266          *
2267          * This should only fail upon a hung GPU, in which case we
2268          * can safely continue.
2269          */
2270         dev_priv->mm.interruptible = false;
2271         ret = i915_gem_object_finish_gpu(obj);
2272         dev_priv->mm.interruptible = was_interruptible;
2273
2274         return ret;
2275 }
2276
2277 static int
2278 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2279                     struct drm_framebuffer *old_fb)
2280 {
2281         struct drm_device *dev = crtc->dev;
2282         struct drm_i915_master_private *master_priv;
2283         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2284         int ret;
2285
2286         /* no fb bound */
2287         if (!crtc->fb) {
2288                 DRM_ERROR("No FB bound\n");
2289                 return 0;
2290         }
2291
2292         switch (intel_crtc->plane) {
2293         case 0:
2294         case 1:
2295                 break;
2296         case 2:
2297                 if (IS_IVYBRIDGE(dev))
2298                         break;
2299                 /* fall through otherwise */
2300         default:
2301                 DRM_ERROR("no plane for crtc\n");
2302                 return -EINVAL;
2303         }
2304
2305         mutex_lock(&dev->struct_mutex);
2306         ret = intel_pin_and_fence_fb_obj(dev,
2307                                          to_intel_framebuffer(crtc->fb)->obj,
2308                                          NULL);
2309         if (ret != 0) {
2310                 mutex_unlock(&dev->struct_mutex);
2311                 DRM_ERROR("pin & fence failed\n");
2312                 return ret;
2313         }
2314
2315         if (old_fb)
2316                 intel_finish_fb(old_fb);
2317
2318         ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2319                                          LEAVE_ATOMIC_MODE_SET);
2320         if (ret) {
2321                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2322                 mutex_unlock(&dev->struct_mutex);
2323                 DRM_ERROR("failed to update base address\n");
2324                 return ret;
2325         }
2326
2327         if (old_fb) {
2328                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2329                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2330         }
2331
2332         mutex_unlock(&dev->struct_mutex);
2333
2334         if (!dev->primary->master)
2335                 return 0;
2336
2337         master_priv = dev->primary->master->driver_priv;
2338         if (!master_priv->sarea_priv)
2339                 return 0;
2340
2341         if (intel_crtc->pipe) {
2342                 master_priv->sarea_priv->pipeB_x = x;
2343                 master_priv->sarea_priv->pipeB_y = y;
2344         } else {
2345                 master_priv->sarea_priv->pipeA_x = x;
2346                 master_priv->sarea_priv->pipeA_y = y;
2347         }
2348
2349         return 0;
2350 }
2351
2352 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2353 {
2354         struct drm_device *dev = crtc->dev;
2355         struct drm_i915_private *dev_priv = dev->dev_private;
2356         u32 dpa_ctl;
2357
2358         DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2359         dpa_ctl = I915_READ(DP_A);
2360         dpa_ctl &= ~DP_PLL_FREQ_MASK;
2361
2362         if (clock < 200000) {
2363                 u32 temp;
2364                 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2365                 /* workaround for 160Mhz:
2366                    1) program 0x4600c bits 15:0 = 0x8124
2367                    2) program 0x46010 bit 0 = 1
2368                    3) program 0x46034 bit 24 = 1
2369                    4) program 0x64000 bit 14 = 1
2370                    */
2371                 temp = I915_READ(0x4600c);
2372                 temp &= 0xffff0000;
2373                 I915_WRITE(0x4600c, temp | 0x8124);
2374
2375                 temp = I915_READ(0x46010);
2376                 I915_WRITE(0x46010, temp | 1);
2377
2378                 temp = I915_READ(0x46034);
2379                 I915_WRITE(0x46034, temp | (1 << 24));
2380         } else {
2381                 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2382         }
2383         I915_WRITE(DP_A, dpa_ctl);
2384
2385         POSTING_READ(DP_A);
2386         udelay(500);
2387 }
2388
2389 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2390 {
2391         struct drm_device *dev = crtc->dev;
2392         struct drm_i915_private *dev_priv = dev->dev_private;
2393         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2394         int pipe = intel_crtc->pipe;
2395         u32 reg, temp;
2396
2397         /* enable normal train */
2398         reg = FDI_TX_CTL(pipe);
2399         temp = I915_READ(reg);
2400         if (IS_IVYBRIDGE(dev)) {
2401                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2402                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2403         } else {
2404                 temp &= ~FDI_LINK_TRAIN_NONE;
2405                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2406         }
2407         I915_WRITE(reg, temp);
2408
2409         reg = FDI_RX_CTL(pipe);
2410         temp = I915_READ(reg);
2411         if (HAS_PCH_CPT(dev)) {
2412                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2413                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2414         } else {
2415                 temp &= ~FDI_LINK_TRAIN_NONE;
2416                 temp |= FDI_LINK_TRAIN_NONE;
2417         }
2418         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2419
2420         /* wait one idle pattern time */
2421         POSTING_READ(reg);
2422         udelay(1000);
2423
2424         /* IVB wants error correction enabled */
2425         if (IS_IVYBRIDGE(dev))
2426                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2427                            FDI_FE_ERRC_ENABLE);
2428 }
2429
2430 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2431 {
2432         struct drm_i915_private *dev_priv = dev->dev_private;
2433         u32 flags = I915_READ(SOUTH_CHICKEN1);
2434
2435         flags |= FDI_PHASE_SYNC_OVR(pipe);
2436         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2437         flags |= FDI_PHASE_SYNC_EN(pipe);
2438         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2439         POSTING_READ(SOUTH_CHICKEN1);
2440 }
2441
2442 /* The FDI link training functions for ILK/Ibexpeak. */
2443 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2444 {
2445         struct drm_device *dev = crtc->dev;
2446         struct drm_i915_private *dev_priv = dev->dev_private;
2447         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2448         int pipe = intel_crtc->pipe;
2449         int plane = intel_crtc->plane;
2450         u32 reg, temp, tries;
2451
2452         /* FDI needs bits from pipe & plane first */
2453         assert_pipe_enabled(dev_priv, pipe);
2454         assert_plane_enabled(dev_priv, plane);
2455
2456         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2457            for train result */
2458         reg = FDI_RX_IMR(pipe);
2459         temp = I915_READ(reg);
2460         temp &= ~FDI_RX_SYMBOL_LOCK;
2461         temp &= ~FDI_RX_BIT_LOCK;
2462         I915_WRITE(reg, temp);
2463         I915_READ(reg);
2464         udelay(150);
2465
2466         /* enable CPU FDI TX and PCH FDI RX */
2467         reg = FDI_TX_CTL(pipe);
2468         temp = I915_READ(reg);
2469         temp &= ~(7 << 19);
2470         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2471         temp &= ~FDI_LINK_TRAIN_NONE;
2472         temp |= FDI_LINK_TRAIN_PATTERN_1;
2473         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2474
2475         reg = FDI_RX_CTL(pipe);
2476         temp = I915_READ(reg);
2477         temp &= ~FDI_LINK_TRAIN_NONE;
2478         temp |= FDI_LINK_TRAIN_PATTERN_1;
2479         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2480
2481         POSTING_READ(reg);
2482         udelay(150);
2483
2484         /* Ironlake workaround, enable clock pointer after FDI enable*/
2485         if (HAS_PCH_IBX(dev)) {
2486                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2487                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2488                            FDI_RX_PHASE_SYNC_POINTER_EN);
2489         }
2490
2491         reg = FDI_RX_IIR(pipe);
2492         for (tries = 0; tries < 5; tries++) {
2493                 temp = I915_READ(reg);
2494                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2495
2496                 if ((temp & FDI_RX_BIT_LOCK)) {
2497                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2498                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2499                         break;
2500                 }
2501         }
2502         if (tries == 5)
2503                 DRM_ERROR("FDI train 1 fail!\n");
2504
2505         /* Train 2 */
2506         reg = FDI_TX_CTL(pipe);
2507         temp = I915_READ(reg);
2508         temp &= ~FDI_LINK_TRAIN_NONE;
2509         temp |= FDI_LINK_TRAIN_PATTERN_2;
2510         I915_WRITE(reg, temp);
2511
2512         reg = FDI_RX_CTL(pipe);
2513         temp = I915_READ(reg);
2514         temp &= ~FDI_LINK_TRAIN_NONE;
2515         temp |= FDI_LINK_TRAIN_PATTERN_2;
2516         I915_WRITE(reg, temp);
2517
2518         POSTING_READ(reg);
2519         udelay(150);
2520
2521         reg = FDI_RX_IIR(pipe);
2522         for (tries = 0; tries < 5; tries++) {
2523                 temp = I915_READ(reg);
2524                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2525
2526                 if (temp & FDI_RX_SYMBOL_LOCK) {
2527                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2528                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2529                         break;
2530                 }
2531         }
2532         if (tries == 5)
2533                 DRM_ERROR("FDI train 2 fail!\n");
2534
2535         DRM_DEBUG_KMS("FDI train done\n");
2536
2537 }
2538
2539 static const int snb_b_fdi_train_param[] = {
2540         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2541         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2542         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2543         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2544 };
2545
2546 /* The FDI link training functions for SNB/Cougarpoint. */
2547 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2548 {
2549         struct drm_device *dev = crtc->dev;
2550         struct drm_i915_private *dev_priv = dev->dev_private;
2551         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2552         int pipe = intel_crtc->pipe;
2553         u32 reg, temp, i, retry;
2554
2555         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2556            for train result */
2557         reg = FDI_RX_IMR(pipe);
2558         temp = I915_READ(reg);
2559         temp &= ~FDI_RX_SYMBOL_LOCK;
2560         temp &= ~FDI_RX_BIT_LOCK;
2561         I915_WRITE(reg, temp);
2562
2563         POSTING_READ(reg);
2564         udelay(150);
2565
2566         /* enable CPU FDI TX and PCH FDI RX */
2567         reg = FDI_TX_CTL(pipe);
2568         temp = I915_READ(reg);
2569         temp &= ~(7 << 19);
2570         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2571         temp &= ~FDI_LINK_TRAIN_NONE;
2572         temp |= FDI_LINK_TRAIN_PATTERN_1;
2573         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2574         /* SNB-B */
2575         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2576         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2577
2578         reg = FDI_RX_CTL(pipe);
2579         temp = I915_READ(reg);
2580         if (HAS_PCH_CPT(dev)) {
2581                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2582                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2583         } else {
2584                 temp &= ~FDI_LINK_TRAIN_NONE;
2585                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2586         }
2587         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2588
2589         POSTING_READ(reg);
2590         udelay(150);
2591
2592         if (HAS_PCH_CPT(dev))
2593                 cpt_phase_pointer_enable(dev, pipe);
2594
2595         for (i = 0; i < 4; i++) {
2596                 reg = FDI_TX_CTL(pipe);
2597                 temp = I915_READ(reg);
2598                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2599                 temp |= snb_b_fdi_train_param[i];
2600                 I915_WRITE(reg, temp);
2601
2602                 POSTING_READ(reg);
2603                 udelay(500);
2604
2605                 for (retry = 0; retry < 5; retry++) {
2606                         reg = FDI_RX_IIR(pipe);
2607                         temp = I915_READ(reg);
2608                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2609                         if (temp & FDI_RX_BIT_LOCK) {
2610                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2611                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2612                                 break;
2613                         }
2614                         udelay(50);
2615                 }
2616                 if (retry < 5)
2617                         break;
2618         }
2619         if (i == 4)
2620                 DRM_ERROR("FDI train 1 fail!\n");
2621
2622         /* Train 2 */
2623         reg = FDI_TX_CTL(pipe);
2624         temp = I915_READ(reg);
2625         temp &= ~FDI_LINK_TRAIN_NONE;
2626         temp |= FDI_LINK_TRAIN_PATTERN_2;
2627         if (IS_GEN6(dev)) {
2628                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2629                 /* SNB-B */
2630                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2631         }
2632         I915_WRITE(reg, temp);
2633
2634         reg = FDI_RX_CTL(pipe);
2635         temp = I915_READ(reg);
2636         if (HAS_PCH_CPT(dev)) {
2637                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2638                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2639         } else {
2640                 temp &= ~FDI_LINK_TRAIN_NONE;
2641                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2642         }
2643         I915_WRITE(reg, temp);
2644
2645         POSTING_READ(reg);
2646         udelay(150);
2647
2648         for (i = 0; i < 4; i++) {
2649                 reg = FDI_TX_CTL(pipe);
2650                 temp = I915_READ(reg);
2651                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2652                 temp |= snb_b_fdi_train_param[i];
2653                 I915_WRITE(reg, temp);
2654
2655                 POSTING_READ(reg);
2656                 udelay(500);
2657
2658                 for (retry = 0; retry < 5; retry++) {
2659                         reg = FDI_RX_IIR(pipe);
2660                         temp = I915_READ(reg);
2661                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2662                         if (temp & FDI_RX_SYMBOL_LOCK) {
2663                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2664                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2665                                 break;
2666                         }
2667                         udelay(50);
2668                 }
2669                 if (retry < 5)
2670                         break;
2671         }
2672         if (i == 4)
2673                 DRM_ERROR("FDI train 2 fail!\n");
2674
2675         DRM_DEBUG_KMS("FDI train done.\n");
2676 }
2677
2678 /* Manual link training for Ivy Bridge A0 parts */
2679 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2680 {
2681         struct drm_device *dev = crtc->dev;
2682         struct drm_i915_private *dev_priv = dev->dev_private;
2683         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2684         int pipe = intel_crtc->pipe;
2685         u32 reg, temp, i;
2686
2687         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2688            for train result */
2689         reg = FDI_RX_IMR(pipe);
2690         temp = I915_READ(reg);
2691         temp &= ~FDI_RX_SYMBOL_LOCK;
2692         temp &= ~FDI_RX_BIT_LOCK;
2693         I915_WRITE(reg, temp);
2694
2695         POSTING_READ(reg);
2696         udelay(150);
2697
2698         /* enable CPU FDI TX and PCH FDI RX */
2699         reg = FDI_TX_CTL(pipe);
2700         temp = I915_READ(reg);
2701         temp &= ~(7 << 19);
2702         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2703         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2704         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2705         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2706         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2707         temp |= FDI_COMPOSITE_SYNC;
2708         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2709
2710         reg = FDI_RX_CTL(pipe);
2711         temp = I915_READ(reg);
2712         temp &= ~FDI_LINK_TRAIN_AUTO;
2713         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2714         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2715         temp |= FDI_COMPOSITE_SYNC;
2716         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2717
2718         POSTING_READ(reg);
2719         udelay(150);
2720
2721         if (HAS_PCH_CPT(dev))
2722                 cpt_phase_pointer_enable(dev, pipe);
2723
2724         for (i = 0; i < 4; i++) {
2725                 reg = FDI_TX_CTL(pipe);
2726                 temp = I915_READ(reg);
2727                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2728                 temp |= snb_b_fdi_train_param[i];
2729                 I915_WRITE(reg, temp);
2730
2731                 POSTING_READ(reg);
2732                 udelay(500);
2733
2734                 reg = FDI_RX_IIR(pipe);
2735                 temp = I915_READ(reg);
2736                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2737
2738                 if (temp & FDI_RX_BIT_LOCK ||
2739                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2740                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2741                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2742                         break;
2743                 }
2744         }
2745         if (i == 4)
2746                 DRM_ERROR("FDI train 1 fail!\n");
2747
2748         /* Train 2 */
2749         reg = FDI_TX_CTL(pipe);
2750         temp = I915_READ(reg);
2751         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2752         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2753         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2754         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2755         I915_WRITE(reg, temp);
2756
2757         reg = FDI_RX_CTL(pipe);
2758         temp = I915_READ(reg);
2759         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2760         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2761         I915_WRITE(reg, temp);
2762
2763         POSTING_READ(reg);
2764         udelay(150);
2765
2766         for (i = 0; i < 4; i++) {
2767                 reg = FDI_TX_CTL(pipe);
2768                 temp = I915_READ(reg);
2769                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2770                 temp |= snb_b_fdi_train_param[i];
2771                 I915_WRITE(reg, temp);
2772
2773                 POSTING_READ(reg);
2774                 udelay(500);
2775
2776                 reg = FDI_RX_IIR(pipe);
2777                 temp = I915_READ(reg);
2778                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2779
2780                 if (temp & FDI_RX_SYMBOL_LOCK) {
2781                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2782                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2783                         break;
2784                 }
2785         }
2786         if (i == 4)
2787                 DRM_ERROR("FDI train 2 fail!\n");
2788
2789         DRM_DEBUG_KMS("FDI train done.\n");
2790 }
2791
2792 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2793 {
2794         struct drm_device *dev = crtc->dev;
2795         struct drm_i915_private *dev_priv = dev->dev_private;
2796         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2797         int pipe = intel_crtc->pipe;
2798         u32 reg, temp;
2799
2800         /* Write the TU size bits so error detection works */
2801         I915_WRITE(FDI_RX_TUSIZE1(pipe),
2802                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2803
2804         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2805         reg = FDI_RX_CTL(pipe);
2806         temp = I915_READ(reg);
2807         temp &= ~((0x7 << 19) | (0x7 << 16));
2808         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2809         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2810         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2811
2812         POSTING_READ(reg);
2813         udelay(200);
2814
2815         /* Switch from Rawclk to PCDclk */
2816         temp = I915_READ(reg);
2817         I915_WRITE(reg, temp | FDI_PCDCLK);
2818
2819         POSTING_READ(reg);
2820         udelay(200);
2821
2822         /* Enable CPU FDI TX PLL, always on for Ironlake */
2823         reg = FDI_TX_CTL(pipe);
2824         temp = I915_READ(reg);
2825         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2826                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2827
2828                 POSTING_READ(reg);
2829                 udelay(100);
2830         }
2831 }
2832
2833 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2834 {
2835         struct drm_i915_private *dev_priv = dev->dev_private;
2836         u32 flags = I915_READ(SOUTH_CHICKEN1);
2837
2838         flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2839         I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2840         flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2841         I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2842         POSTING_READ(SOUTH_CHICKEN1);
2843 }
2844 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2845 {
2846         struct drm_device *dev = crtc->dev;
2847         struct drm_i915_private *dev_priv = dev->dev_private;
2848         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2849         int pipe = intel_crtc->pipe;
2850         u32 reg, temp;
2851
2852         /* disable CPU FDI tx and PCH FDI rx */
2853         reg = FDI_TX_CTL(pipe);
2854         temp = I915_READ(reg);
2855         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2856         POSTING_READ(reg);
2857
2858         reg = FDI_RX_CTL(pipe);
2859         temp = I915_READ(reg);
2860         temp &= ~(0x7 << 16);
2861         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2862         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2863
2864         POSTING_READ(reg);
2865         udelay(100);
2866
2867         /* Ironlake workaround, disable clock pointer after downing FDI */
2868         if (HAS_PCH_IBX(dev)) {
2869                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2870                 I915_WRITE(FDI_RX_CHICKEN(pipe),
2871                            I915_READ(FDI_RX_CHICKEN(pipe) &
2872                                      ~FDI_RX_PHASE_SYNC_POINTER_EN));
2873         } else if (HAS_PCH_CPT(dev)) {
2874                 cpt_phase_pointer_disable(dev, pipe);
2875         }
2876
2877         /* still set train pattern 1 */
2878         reg = FDI_TX_CTL(pipe);
2879         temp = I915_READ(reg);
2880         temp &= ~FDI_LINK_TRAIN_NONE;
2881         temp |= FDI_LINK_TRAIN_PATTERN_1;
2882         I915_WRITE(reg, temp);
2883
2884         reg = FDI_RX_CTL(pipe);
2885         temp = I915_READ(reg);
2886         if (HAS_PCH_CPT(dev)) {
2887                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2888                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2889         } else {
2890                 temp &= ~FDI_LINK_TRAIN_NONE;
2891                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2892         }
2893         /* BPC in FDI rx is consistent with that in PIPECONF */
2894         temp &= ~(0x07 << 16);
2895         temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2896         I915_WRITE(reg, temp);
2897
2898         POSTING_READ(reg);
2899         udelay(100);
2900 }
2901
2902 /*
2903  * When we disable a pipe, we need to clear any pending scanline wait events
2904  * to avoid hanging the ring, which we assume we are waiting on.
2905  */
2906 static void intel_clear_scanline_wait(struct drm_device *dev)
2907 {
2908         struct drm_i915_private *dev_priv = dev->dev_private;
2909         struct intel_ring_buffer *ring;
2910         u32 tmp;
2911
2912         if (IS_GEN2(dev))
2913                 /* Can't break the hang on i8xx */
2914                 return;
2915
2916         ring = LP_RING(dev_priv);
2917         tmp = I915_READ_CTL(ring);
2918         if (tmp & RING_WAIT)
2919                 I915_WRITE_CTL(ring, tmp);
2920 }
2921
2922 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2923 {
2924         struct drm_i915_gem_object *obj;
2925         struct drm_i915_private *dev_priv;
2926
2927         if (crtc->fb == NULL)
2928                 return;
2929
2930         obj = to_intel_framebuffer(crtc->fb)->obj;
2931         dev_priv = crtc->dev->dev_private;
2932         wait_event(dev_priv->pending_flip_queue,
2933                    atomic_read(&obj->pending_flip) == 0);
2934 }
2935
2936 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2937 {
2938         struct drm_device *dev = crtc->dev;
2939         struct drm_mode_config *mode_config = &dev->mode_config;
2940         struct intel_encoder *encoder;
2941
2942         /*
2943          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2944          * must be driven by its own crtc; no sharing is possible.
2945          */
2946         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2947                 if (encoder->base.crtc != crtc)
2948                         continue;
2949
2950                 switch (encoder->type) {
2951                 case INTEL_OUTPUT_EDP:
2952                         if (!intel_encoder_is_pch_edp(&encoder->base))
2953                                 return false;
2954                         continue;
2955                 }
2956         }
2957
2958         return true;
2959 }
2960
2961 /*
2962  * Enable PCH resources required for PCH ports:
2963  *   - PCH PLLs
2964  *   - FDI training & RX/TX
2965  *   - update transcoder timings
2966  *   - DP transcoding bits
2967  *   - transcoder
2968  */
2969 static void ironlake_pch_enable(struct drm_crtc *crtc)
2970 {
2971         struct drm_device *dev = crtc->dev;
2972         struct drm_i915_private *dev_priv = dev->dev_private;
2973         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2974         int pipe = intel_crtc->pipe;
2975         u32 reg, temp, transc_sel;
2976
2977         /* For PCH output, training FDI link */
2978         dev_priv->display.fdi_link_train(crtc);
2979
2980         intel_enable_pch_pll(dev_priv, pipe);
2981
2982         if (HAS_PCH_CPT(dev)) {
2983                 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2984                         TRANSC_DPLLB_SEL;
2985
2986                 /* Be sure PCH DPLL SEL is set */
2987                 temp = I915_READ(PCH_DPLL_SEL);
2988                 if (pipe == 0) {
2989                         temp &= ~(TRANSA_DPLLB_SEL);
2990                         temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2991                 } else if (pipe == 1) {
2992                         temp &= ~(TRANSB_DPLLB_SEL);
2993                         temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2994                 } else if (pipe == 2) {
2995                         temp &= ~(TRANSC_DPLLB_SEL);
2996                         temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2997                 }
2998                 I915_WRITE(PCH_DPLL_SEL, temp);
2999         }
3000
3001         /* set transcoder timing, panel must allow it */
3002         assert_panel_unlocked(dev_priv, pipe);
3003         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3004         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3005         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3006
3007         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3008         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3009         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3010         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3011
3012         intel_fdi_normal_train(crtc);
3013
3014         /* For PCH DP, enable TRANS_DP_CTL */
3015         if (HAS_PCH_CPT(dev) &&
3016             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3017              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3018                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3019                 reg = TRANS_DP_CTL(pipe);
3020                 temp = I915_READ(reg);
3021                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3022                           TRANS_DP_SYNC_MASK |
3023                           TRANS_DP_BPC_MASK);
3024                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3025                          TRANS_DP_ENH_FRAMING);
3026                 temp |= bpc << 9; /* same format but at 11:9 */
3027
3028                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3029                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3030                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3031                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3032
3033                 switch (intel_trans_dp_port_sel(crtc)) {
3034                 case PCH_DP_B:
3035                         temp |= TRANS_DP_PORT_SEL_B;
3036                         break;
3037                 case PCH_DP_C:
3038                         temp |= TRANS_DP_PORT_SEL_C;
3039                         break;
3040                 case PCH_DP_D:
3041                         temp |= TRANS_DP_PORT_SEL_D;
3042                         break;
3043                 default:
3044                         DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3045                         temp |= TRANS_DP_PORT_SEL_B;
3046                         break;
3047                 }
3048
3049                 I915_WRITE(reg, temp);
3050         }
3051
3052         intel_enable_transcoder(dev_priv, pipe);
3053 }
3054
3055 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3056 {
3057         struct drm_i915_private *dev_priv = dev->dev_private;
3058         int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3059         u32 temp;
3060
3061         temp = I915_READ(dslreg);
3062         udelay(500);
3063         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3064                 /* Without this, mode sets may fail silently on FDI */
3065                 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3066                 udelay(250);
3067                 I915_WRITE(tc2reg, 0);
3068                 if (wait_for(I915_READ(dslreg) != temp, 5))
3069                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3070         }
3071 }
3072
3073 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3074 {
3075         struct drm_device *dev = crtc->dev;
3076         struct drm_i915_private *dev_priv = dev->dev_private;
3077         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3078         int pipe = intel_crtc->pipe;
3079         int plane = intel_crtc->plane;
3080         u32 temp;
3081         bool is_pch_port;
3082
3083         if (intel_crtc->active)
3084                 return;
3085
3086         intel_crtc->active = true;
3087         intel_update_watermarks(dev);
3088
3089         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3090                 temp = I915_READ(PCH_LVDS);
3091                 if ((temp & LVDS_PORT_EN) == 0)
3092                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3093         }
3094
3095         is_pch_port = intel_crtc_driving_pch(crtc);
3096
3097         if (is_pch_port)
3098                 ironlake_fdi_pll_enable(crtc);
3099         else
3100                 ironlake_fdi_disable(crtc);
3101
3102         /* Enable panel fitting for LVDS */
3103         if (dev_priv->pch_pf_size &&
3104             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3105                 /* Force use of hard-coded filter coefficients
3106                  * as some pre-programmed values are broken,
3107                  * e.g. x201.
3108                  */
3109                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3110                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3111                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3112         }
3113
3114         /*
3115          * On ILK+ LUT must be loaded before the pipe is running but with
3116          * clocks enabled
3117          */
3118         intel_crtc_load_lut(crtc);
3119
3120         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3121         intel_enable_plane(dev_priv, plane, pipe);
3122
3123         if (is_pch_port)
3124                 ironlake_pch_enable(crtc);
3125
3126         mutex_lock(&dev->struct_mutex);
3127         intel_update_fbc(dev);
3128         mutex_unlock(&dev->struct_mutex);
3129
3130         intel_crtc_update_cursor(crtc, true);
3131 }
3132
3133 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3134 {
3135         struct drm_device *dev = crtc->dev;
3136         struct drm_i915_private *dev_priv = dev->dev_private;
3137         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3138         int pipe = intel_crtc->pipe;
3139         int plane = intel_crtc->plane;
3140         u32 reg, temp;
3141
3142         if (!intel_crtc->active)
3143                 return;
3144
3145         intel_crtc_wait_for_pending_flips(crtc);
3146         drm_vblank_off(dev, pipe);
3147         intel_crtc_update_cursor(crtc, false);
3148
3149         intel_disable_plane(dev_priv, plane, pipe);
3150
3151         if (dev_priv->cfb_plane == plane)
3152                 intel_disable_fbc(dev);
3153
3154         intel_disable_pipe(dev_priv, pipe);
3155
3156         /* Disable PF */
3157         I915_WRITE(PF_CTL(pipe), 0);
3158         I915_WRITE(PF_WIN_SZ(pipe), 0);
3159
3160         ironlake_fdi_disable(crtc);
3161
3162         /* This is a horrible layering violation; we should be doing this in
3163          * the connector/encoder ->prepare instead, but we don't always have
3164          * enough information there about the config to know whether it will
3165          * actually be necessary or just cause undesired flicker.
3166          */
3167         intel_disable_pch_ports(dev_priv, pipe);
3168
3169         intel_disable_transcoder(dev_priv, pipe);
3170
3171         if (HAS_PCH_CPT(dev)) {
3172                 /* disable TRANS_DP_CTL */
3173                 reg = TRANS_DP_CTL(pipe);
3174                 temp = I915_READ(reg);
3175                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3176                 temp |= TRANS_DP_PORT_SEL_NONE;
3177                 I915_WRITE(reg, temp);
3178
3179                 /* disable DPLL_SEL */
3180                 temp = I915_READ(PCH_DPLL_SEL);
3181                 switch (pipe) {
3182                 case 0:
3183                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3184                         break;
3185                 case 1:
3186                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3187                         break;
3188                 case 2:
3189                         /* C shares PLL A or B */
3190                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3191                         break;
3192                 default:
3193                         BUG(); /* wtf */
3194                 }
3195                 I915_WRITE(PCH_DPLL_SEL, temp);
3196         }
3197
3198         /* disable PCH DPLL */
3199         if (!intel_crtc->no_pll)
3200                 intel_disable_pch_pll(dev_priv, pipe);
3201
3202         /* Switch from PCDclk to Rawclk */
3203         reg = FDI_RX_CTL(pipe);
3204         temp = I915_READ(reg);
3205         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3206
3207         /* Disable CPU FDI TX PLL */
3208         reg = FDI_TX_CTL(pipe);
3209         temp = I915_READ(reg);
3210         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3211
3212         POSTING_READ(reg);
3213         udelay(100);
3214
3215         reg = FDI_RX_CTL(pipe);
3216         temp = I915_READ(reg);
3217         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3218
3219         /* Wait for the clocks to turn off. */
3220         POSTING_READ(reg);
3221         udelay(100);
3222
3223         intel_crtc->active = false;
3224         intel_update_watermarks(dev);
3225
3226         mutex_lock(&dev->struct_mutex);
3227         intel_update_fbc(dev);
3228         intel_clear_scanline_wait(dev);
3229         mutex_unlock(&dev->struct_mutex);
3230 }
3231
3232 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3233 {
3234         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3235         int pipe = intel_crtc->pipe;
3236         int plane = intel_crtc->plane;
3237
3238         /* XXX: When our outputs are all unaware of DPMS modes other than off
3239          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3240          */
3241         switch (mode) {
3242         case DRM_MODE_DPMS_ON:
3243         case DRM_MODE_DPMS_STANDBY:
3244         case DRM_MODE_DPMS_SUSPEND:
3245                 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3246                 ironlake_crtc_enable(crtc);
3247                 break;
3248
3249         case DRM_MODE_DPMS_OFF:
3250                 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3251                 ironlake_crtc_disable(crtc);
3252                 break;
3253         }
3254 }
3255
3256 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3257 {
3258         if (!enable && intel_crtc->overlay) {
3259                 struct drm_device *dev = intel_crtc->base.dev;
3260                 struct drm_i915_private *dev_priv = dev->dev_private;
3261
3262                 mutex_lock(&dev->struct_mutex);
3263                 dev_priv->mm.interruptible = false;
3264                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3265                 dev_priv->mm.interruptible = true;
3266                 mutex_unlock(&dev->struct_mutex);
3267         }
3268
3269         /* Let userspace switch the overlay on again. In most cases userspace
3270          * has to recompute where to put it anyway.
3271          */
3272 }
3273
3274 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3275 {
3276         struct drm_device *dev = crtc->dev;
3277         struct drm_i915_private *dev_priv = dev->dev_private;
3278         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3279         int pipe = intel_crtc->pipe;
3280         int plane = intel_crtc->plane;
3281
3282         if (intel_crtc->active)
3283                 return;
3284
3285         intel_crtc->active = true;
3286         intel_update_watermarks(dev);
3287
3288         intel_enable_pll(dev_priv, pipe);
3289         intel_enable_pipe(dev_priv, pipe, false);
3290         intel_enable_plane(dev_priv, plane, pipe);
3291
3292         intel_crtc_load_lut(crtc);
3293         intel_update_fbc(dev);
3294
3295         /* Give the overlay scaler a chance to enable if it's on this pipe */
3296         intel_crtc_dpms_overlay(intel_crtc, true);
3297         intel_crtc_update_cursor(crtc, true);
3298 }
3299
3300 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3301 {
3302         struct drm_device *dev = crtc->dev;
3303         struct drm_i915_private *dev_priv = dev->dev_private;
3304         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3305         int pipe = intel_crtc->pipe;
3306         int plane = intel_crtc->plane;
3307
3308         if (!intel_crtc->active)
3309                 return;
3310
3311         /* Give the overlay scaler a chance to disable if it's on this pipe */
3312         intel_crtc_wait_for_pending_flips(crtc);
3313         drm_vblank_off(dev, pipe);
3314         intel_crtc_dpms_overlay(intel_crtc, false);
3315         intel_crtc_update_cursor(crtc, false);
3316
3317         if (dev_priv->cfb_plane == plane)
3318                 intel_disable_fbc(dev);
3319
3320         intel_disable_plane(dev_priv, plane, pipe);
3321         intel_disable_pipe(dev_priv, pipe);
3322         intel_disable_pll(dev_priv, pipe);
3323
3324         intel_crtc->active = false;
3325         intel_update_fbc(dev);
3326         intel_update_watermarks(dev);
3327         intel_clear_scanline_wait(dev);
3328 }
3329
3330 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3331 {
3332         /* XXX: When our outputs are all unaware of DPMS modes other than off
3333          * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3334          */
3335         switch (mode) {
3336         case DRM_MODE_DPMS_ON:
3337         case DRM_MODE_DPMS_STANDBY:
3338         case DRM_MODE_DPMS_SUSPEND:
3339                 i9xx_crtc_enable(crtc);
3340                 break;
3341         case DRM_MODE_DPMS_OFF:
3342                 i9xx_crtc_disable(crtc);
3343                 break;
3344         }
3345 }
3346
3347 /**
3348  * Sets the power management mode of the pipe and plane.
3349  */
3350 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3351 {
3352         struct drm_device *dev = crtc->dev;
3353         struct drm_i915_private *dev_priv = dev->dev_private;
3354         struct drm_i915_master_private *master_priv;
3355         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3356         int pipe = intel_crtc->pipe;
3357         bool enabled;
3358
3359         if (intel_crtc->dpms_mode == mode)
3360                 return;
3361
3362         intel_crtc->dpms_mode = mode;
3363
3364         dev_priv->display.dpms(crtc, mode);
3365
3366         if (!dev->primary->master)
3367                 return;
3368
3369         master_priv = dev->primary->master->driver_priv;
3370         if (!master_priv->sarea_priv)
3371                 return;
3372
3373         enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3374
3375         switch (pipe) {
3376         case 0:
3377                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3378                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3379                 break;
3380         case 1:
3381                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3382                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3383                 break;
3384         default:
3385                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3386                 break;
3387         }
3388 }
3389
3390 static void intel_crtc_disable(struct drm_crtc *crtc)
3391 {
3392         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3393         struct drm_device *dev = crtc->dev;
3394
3395         /* Flush any pending WAITs before we disable the pipe. Note that
3396          * we need to drop the struct_mutex in order to acquire it again
3397          * during the lowlevel dpms routines around a couple of the
3398          * operations. It does not look trivial nor desirable to move
3399          * that locking higher. So instead we leave a window for the
3400          * submission of further commands on the fb before we can actually
3401          * disable it. This race with userspace exists anyway, and we can
3402          * only rely on the pipe being disabled by userspace after it
3403          * receives the hotplug notification and has flushed any pending
3404          * batches.
3405          */
3406         if (crtc->fb) {
3407                 mutex_lock(&dev->struct_mutex);
3408                 intel_finish_fb(crtc->fb);
3409                 mutex_unlock(&dev->struct_mutex);
3410         }
3411
3412         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3413         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3414         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3415
3416         if (crtc->fb) {
3417                 mutex_lock(&dev->struct_mutex);
3418                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3419                 mutex_unlock(&dev->struct_mutex);
3420         }
3421 }
3422
3423 /* Prepare for a mode set.
3424  *
3425  * Note we could be a lot smarter here.  We need to figure out which outputs
3426  * will be enabled, which disabled (in short, how the config will changes)
3427  * and perform the minimum necessary steps to accomplish that, e.g. updating
3428  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3429  * panel fitting is in the proper state, etc.
3430  */
3431 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3432 {
3433         i9xx_crtc_disable(crtc);
3434 }
3435
3436 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3437 {
3438         i9xx_crtc_enable(crtc);
3439 }
3440
3441 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3442 {
3443         ironlake_crtc_disable(crtc);
3444 }
3445
3446 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3447 {
3448         ironlake_crtc_enable(crtc);
3449 }
3450
3451 void intel_encoder_prepare(struct drm_encoder *encoder)
3452 {
3453         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3454         /* lvds has its own version of prepare see intel_lvds_prepare */
3455         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3456 }
3457
3458 void intel_encoder_commit(struct drm_encoder *encoder)
3459 {
3460         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3461         struct drm_device *dev = encoder->dev;
3462         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3463         struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3464
3465         /* lvds has its own version of commit see intel_lvds_commit */
3466         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3467
3468         if (HAS_PCH_CPT(dev))
3469                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3470 }
3471
3472 void intel_encoder_destroy(struct drm_encoder *encoder)
3473 {
3474         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3475
3476         drm_encoder_cleanup(encoder);
3477         kfree(intel_encoder);
3478 }
3479
3480 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3481                                   struct drm_display_mode *mode,
3482                                   struct drm_display_mode *adjusted_mode)
3483 {
3484         struct drm_device *dev = crtc->dev;
3485
3486         if (HAS_PCH_SPLIT(dev)) {
3487                 /* FDI link clock is fixed at 2.7G */
3488                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3489                         return false;
3490         }
3491
3492         /* All interlaced capable intel hw wants timings in frames. Note though
3493          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3494          * timings, so we need to be careful not to clobber these.*/
3495         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3496                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3497
3498         return true;
3499 }
3500
3501 static int i945_get_display_clock_speed(struct drm_device *dev)
3502 {
3503         return 400000;
3504 }
3505
3506 static int i915_get_display_clock_speed(struct drm_device *dev)
3507 {
3508         return 333000;
3509 }
3510
3511 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3512 {
3513         return 200000;
3514 }
3515
3516 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3517 {
3518         u16 gcfgc = 0;
3519
3520         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3521
3522         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3523                 return 133000;
3524         else {
3525                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3526                 case GC_DISPLAY_CLOCK_333_MHZ:
3527                         return 333000;
3528                 default:
3529                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3530                         return 190000;
3531                 }
3532         }
3533 }
3534
3535 static int i865_get_display_clock_speed(struct drm_device *dev)
3536 {
3537         return 266000;
3538 }
3539
3540 static int i855_get_display_clock_speed(struct drm_device *dev)
3541 {
3542         u16 hpllcc = 0;
3543         /* Assume that the hardware is in the high speed state.  This
3544          * should be the default.
3545          */
3546         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3547         case GC_CLOCK_133_200:
3548         case GC_CLOCK_100_200:
3549                 return 200000;
3550         case GC_CLOCK_166_250:
3551                 return 250000;
3552         case GC_CLOCK_100_133:
3553                 return 133000;
3554         }
3555
3556         /* Shouldn't happen */
3557         return 0;
3558 }
3559
3560 static int i830_get_display_clock_speed(struct drm_device *dev)
3561 {
3562         return 133000;
3563 }
3564
3565 struct fdi_m_n {
3566         u32        tu;
3567         u32        gmch_m;
3568         u32        gmch_n;
3569         u32        link_m;
3570         u32        link_n;
3571 };
3572
3573 static void
3574 fdi_reduce_ratio(u32 *num, u32 *den)
3575 {
3576         while (*num > 0xffffff || *den > 0xffffff) {
3577                 *num >>= 1;
3578                 *den >>= 1;
3579         }
3580 }
3581
3582 static void
3583 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3584                      int link_clock, struct fdi_m_n *m_n)
3585 {
3586         m_n->tu = 64; /* default size */
3587
3588         /* BUG_ON(pixel_clock > INT_MAX / 36); */
3589         m_n->gmch_m = bits_per_pixel * pixel_clock;
3590         m_n->gmch_n = link_clock * nlanes * 8;
3591         fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3592
3593         m_n->link_m = pixel_clock;
3594         m_n->link_n = link_clock;
3595         fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3596 }
3597
3598
3599 struct intel_watermark_params {
3600         unsigned long fifo_size;
3601         unsigned long max_wm;
3602         unsigned long default_wm;
3603         unsigned long guard_size;
3604         unsigned long cacheline_size;
3605 };
3606
3607 /* Pineview has different values for various configs */
3608 static const struct intel_watermark_params pineview_display_wm = {
3609         PINEVIEW_DISPLAY_FIFO,
3610         PINEVIEW_MAX_WM,
3611         PINEVIEW_DFT_WM,
3612         PINEVIEW_GUARD_WM,
3613         PINEVIEW_FIFO_LINE_SIZE
3614 };
3615 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3616         PINEVIEW_DISPLAY_FIFO,
3617         PINEVIEW_MAX_WM,
3618         PINEVIEW_DFT_HPLLOFF_WM,
3619         PINEVIEW_GUARD_WM,
3620         PINEVIEW_FIFO_LINE_SIZE
3621 };
3622 static const struct intel_watermark_params pineview_cursor_wm = {
3623         PINEVIEW_CURSOR_FIFO,
3624         PINEVIEW_CURSOR_MAX_WM,
3625         PINEVIEW_CURSOR_DFT_WM,
3626         PINEVIEW_CURSOR_GUARD_WM,
3627         PINEVIEW_FIFO_LINE_SIZE,
3628 };
3629 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3630         PINEVIEW_CURSOR_FIFO,
3631         PINEVIEW_CURSOR_MAX_WM,
3632         PINEVIEW_CURSOR_DFT_WM,
3633         PINEVIEW_CURSOR_GUARD_WM,
3634         PINEVIEW_FIFO_LINE_SIZE
3635 };
3636 static const struct intel_watermark_params g4x_wm_info = {
3637         G4X_FIFO_SIZE,
3638         G4X_MAX_WM,
3639         G4X_MAX_WM,
3640         2,
3641         G4X_FIFO_LINE_SIZE,
3642 };
3643 static const struct intel_watermark_params g4x_cursor_wm_info = {
3644         I965_CURSOR_FIFO,
3645         I965_CURSOR_MAX_WM,
3646         I965_CURSOR_DFT_WM,
3647         2,
3648         G4X_FIFO_LINE_SIZE,
3649 };
3650 static const struct intel_watermark_params i965_cursor_wm_info = {
3651         I965_CURSOR_FIFO,
3652         I965_CURSOR_MAX_WM,
3653         I965_CURSOR_DFT_WM,
3654         2,
3655         I915_FIFO_LINE_SIZE,
3656 };
3657 static const struct intel_watermark_params i945_wm_info = {
3658         I945_FIFO_SIZE,
3659         I915_MAX_WM,
3660         1,
3661         2,
3662         I915_FIFO_LINE_SIZE
3663 };
3664 static const struct intel_watermark_params i915_wm_info = {
3665         I915_FIFO_SIZE,
3666         I915_MAX_WM,
3667         1,
3668         2,
3669         I915_FIFO_LINE_SIZE
3670 };
3671 static const struct intel_watermark_params i855_wm_info = {
3672         I855GM_FIFO_SIZE,
3673         I915_MAX_WM,
3674         1,
3675         2,
3676         I830_FIFO_LINE_SIZE
3677 };
3678 static const struct intel_watermark_params i830_wm_info = {
3679         I830_FIFO_SIZE,
3680         I915_MAX_WM,
3681         1,
3682         2,
3683         I830_FIFO_LINE_SIZE
3684 };
3685
3686 static const struct intel_watermark_params ironlake_display_wm_info = {
3687         ILK_DISPLAY_FIFO,
3688         ILK_DISPLAY_MAXWM,
3689         ILK_DISPLAY_DFTWM,
3690         2,
3691         ILK_FIFO_LINE_SIZE
3692 };
3693 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3694         ILK_CURSOR_FIFO,
3695         ILK_CURSOR_MAXWM,
3696         ILK_CURSOR_DFTWM,
3697         2,
3698         ILK_FIFO_LINE_SIZE
3699 };
3700 static const struct intel_watermark_params ironlake_display_srwm_info = {
3701         ILK_DISPLAY_SR_FIFO,
3702         ILK_DISPLAY_MAX_SRWM,
3703         ILK_DISPLAY_DFT_SRWM,
3704         2,
3705         ILK_FIFO_LINE_SIZE
3706 };
3707 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3708         ILK_CURSOR_SR_FIFO,
3709         ILK_CURSOR_MAX_SRWM,
3710         ILK_CURSOR_DFT_SRWM,
3711         2,
3712         ILK_FIFO_LINE_SIZE
3713 };
3714
3715 static const struct intel_watermark_params sandybridge_display_wm_info = {
3716         SNB_DISPLAY_FIFO,
3717         SNB_DISPLAY_MAXWM,
3718         SNB_DISPLAY_DFTWM,
3719         2,
3720         SNB_FIFO_LINE_SIZE
3721 };
3722 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3723         SNB_CURSOR_FIFO,
3724         SNB_CURSOR_MAXWM,
3725         SNB_CURSOR_DFTWM,
3726         2,
3727         SNB_FIFO_LINE_SIZE
3728 };
3729 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3730         SNB_DISPLAY_SR_FIFO,
3731         SNB_DISPLAY_MAX_SRWM,
3732         SNB_DISPLAY_DFT_SRWM,
3733         2,
3734         SNB_FIFO_LINE_SIZE
3735 };
3736 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3737         SNB_CURSOR_SR_FIFO,
3738         SNB_CURSOR_MAX_SRWM,
3739         SNB_CURSOR_DFT_SRWM,
3740         2,
3741         SNB_FIFO_LINE_SIZE
3742 };
3743
3744
3745 /**
3746  * intel_calculate_wm - calculate watermark level
3747  * @clock_in_khz: pixel clock
3748  * @wm: chip FIFO params
3749  * @pixel_size: display pixel size
3750  * @latency_ns: memory latency for the platform
3751  *
3752  * Calculate the watermark level (the level at which the display plane will
3753  * start fetching from memory again).  Each chip has a different display
3754  * FIFO size and allocation, so the caller needs to figure that out and pass
3755  * in the correct intel_watermark_params structure.
3756  *
3757  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3758  * on the pixel size.  When it reaches the watermark level, it'll start
3759  * fetching FIFO line sized based chunks from memory until the FIFO fills
3760  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3761  * will occur, and a display engine hang could result.
3762  */
3763 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3764                                         const struct intel_watermark_params *wm,
3765                                         int fifo_size,
3766                                         int pixel_size,
3767                                         unsigned long latency_ns)
3768 {
3769         long entries_required, wm_size;
3770
3771         /*
3772          * Note: we need to make sure we don't overflow for various clock &
3773          * latency values.
3774          * clocks go from a few thousand to several hundred thousand.
3775          * latency is usually a few thousand
3776          */
3777         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3778                 1000;
3779         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3780
3781         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3782
3783         wm_size = fifo_size - (entries_required + wm->guard_size);
3784
3785         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3786
3787         /* Don't promote wm_size to unsigned... */
3788         if (wm_size > (long)wm->max_wm)
3789                 wm_size = wm->max_wm;
3790         if (wm_size <= 0)
3791                 wm_size = wm->default_wm;
3792         return wm_size;
3793 }
3794
3795 struct cxsr_latency {
3796         int is_desktop;
3797         int is_ddr3;
3798         unsigned long fsb_freq;
3799         unsigned long mem_freq;
3800         unsigned long display_sr;
3801         unsigned long display_hpll_disable;
3802         unsigned long cursor_sr;
3803         unsigned long cursor_hpll_disable;
3804 };
3805
3806 static const struct cxsr_latency cxsr_latency_table[] = {
3807         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3808         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3809         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3810         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3811         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3812
3813         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3814         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3815         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3816         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3817         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3818
3819         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3820         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3821         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3822         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3823         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3824
3825         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3826         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3827         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3828         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3829         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3830
3831         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3832         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3833         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3834         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3835         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3836
3837         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3838         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3839         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3840         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3841         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3842 };
3843
3844 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3845                                                          int is_ddr3,
3846                                                          int fsb,
3847                                                          int mem)
3848 {
3849         const struct cxsr_latency *latency;
3850         int i;
3851
3852         if (fsb == 0 || mem == 0)
3853                 return NULL;
3854
3855         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3856                 latency = &cxsr_latency_table[i];
3857                 if (is_desktop == latency->is_desktop &&
3858                     is_ddr3 == latency->is_ddr3 &&
3859                     fsb == latency->fsb_freq && mem == latency->mem_freq)
3860                         return latency;
3861         }
3862
3863         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3864
3865         return NULL;
3866 }
3867
3868 static void pineview_disable_cxsr(struct drm_device *dev)
3869 {
3870         struct drm_i915_private *dev_priv = dev->dev_private;
3871
3872         /* deactivate cxsr */
3873         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3874 }
3875
3876 /*
3877  * Latency for FIFO fetches is dependent on several factors:
3878  *   - memory configuration (speed, channels)
3879  *   - chipset
3880  *   - current MCH state
3881  * It can be fairly high in some situations, so here we assume a fairly
3882  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3883  * set this value too high, the FIFO will fetch frequently to stay full)
3884  * and power consumption (set it too low to save power and we might see
3885  * FIFO underruns and display "flicker").
3886  *
3887  * A value of 5us seems to be a good balance; safe for very low end
3888  * platforms but not overly aggressive on lower latency configs.
3889  */
3890 static const int latency_ns = 5000;
3891
3892 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3893 {
3894         struct drm_i915_private *dev_priv = dev->dev_private;
3895         uint32_t dsparb = I915_READ(DSPARB);
3896         int size;
3897
3898         size = dsparb & 0x7f;
3899         if (plane)
3900                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3901
3902         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3903                       plane ? "B" : "A", size);
3904
3905         return size;
3906 }
3907
3908 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3909 {
3910         struct drm_i915_private *dev_priv = dev->dev_private;
3911         uint32_t dsparb = I915_READ(DSPARB);
3912         int size;
3913
3914         size = dsparb & 0x1ff;
3915         if (plane)
3916                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3917         size >>= 1; /* Convert to cachelines */
3918
3919         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3920                       plane ? "B" : "A", size);
3921
3922         return size;
3923 }
3924
3925 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3926 {
3927         struct drm_i915_private *dev_priv = dev->dev_private;
3928         uint32_t dsparb = I915_READ(DSPARB);
3929         int size;
3930
3931         size = dsparb & 0x7f;
3932         size >>= 2; /* Convert to cachelines */
3933
3934         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3935                       plane ? "B" : "A",
3936                       size);
3937
3938         return size;
3939 }
3940
3941 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3942 {
3943         struct drm_i915_private *dev_priv = dev->dev_private;
3944         uint32_t dsparb = I915_READ(DSPARB);
3945         int size;
3946
3947         size = dsparb & 0x7f;
3948         size >>= 1; /* Convert to cachelines */
3949
3950         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3951                       plane ? "B" : "A", size);
3952
3953         return size;
3954 }
3955
3956 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3957 {
3958         struct drm_crtc *crtc, *enabled = NULL;
3959
3960         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3961                 if (crtc->enabled && crtc->fb) {
3962                         if (enabled)
3963                                 return NULL;
3964                         enabled = crtc;
3965                 }
3966         }
3967
3968         return enabled;
3969 }
3970
3971 static void pineview_update_wm(struct drm_device *dev)
3972 {
3973         struct drm_i915_private *dev_priv = dev->dev_private;
3974         struct drm_crtc *crtc;
3975         const struct cxsr_latency *latency;
3976         u32 reg;
3977         unsigned long wm;
3978
3979         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3980                                          dev_priv->fsb_freq, dev_priv->mem_freq);
3981         if (!latency) {
3982                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3983                 pineview_disable_cxsr(dev);
3984                 return;
3985         }
3986
3987         crtc = single_enabled_crtc(dev);
3988         if (crtc) {
3989                 int clock = crtc->mode.clock;
3990                 int pixel_size = crtc->fb->bits_per_pixel / 8;
3991
3992                 /* Display SR */
3993                 wm = intel_calculate_wm(clock, &pineview_display_wm,
3994                                         pineview_display_wm.fifo_size,
3995                                         pixel_size, latency->display_sr);
3996                 reg = I915_READ(DSPFW1);
3997                 reg &= ~DSPFW_SR_MASK;
3998                 reg |= wm << DSPFW_SR_SHIFT;
3999                 I915_WRITE(DSPFW1, reg);
4000                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4001
4002                 /* cursor SR */
4003                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4004                                         pineview_display_wm.fifo_size,
4005                                         pixel_size, latency->cursor_sr);
4006                 reg = I915_READ(DSPFW3);
4007                 reg &= ~DSPFW_CURSOR_SR_MASK;
4008                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4009                 I915_WRITE(DSPFW3, reg);
4010
4011                 /* Display HPLL off SR */
4012                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4013                                         pineview_display_hplloff_wm.fifo_size,
4014                                         pixel_size, latency->display_hpll_disable);
4015                 reg = I915_READ(DSPFW3);
4016                 reg &= ~DSPFW_HPLL_SR_MASK;
4017                 reg |= wm & DSPFW_HPLL_SR_MASK;
4018                 I915_WRITE(DSPFW3, reg);
4019
4020                 /* cursor HPLL off SR */
4021                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4022                                         pineview_display_hplloff_wm.fifo_size,
4023                                         pixel_size, latency->cursor_hpll_disable);
4024                 reg = I915_READ(DSPFW3);
4025                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4026                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4027                 I915_WRITE(DSPFW3, reg);
4028                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4029
4030                 /* activate cxsr */
4031                 I915_WRITE(DSPFW3,
4032                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4033                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4034         } else {
4035                 pineview_disable_cxsr(dev);
4036                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4037         }
4038 }
4039
4040 static bool g4x_compute_wm0(struct drm_device *dev,
4041                             int plane,
4042                             const struct intel_watermark_params *display,
4043                             int display_latency_ns,
4044                             const struct intel_watermark_params *cursor,
4045                             int cursor_latency_ns,
4046                             int *plane_wm,
4047                             int *cursor_wm)
4048 {
4049         struct drm_crtc *crtc;
4050         int htotal, hdisplay, clock, pixel_size;
4051         int line_time_us, line_count;
4052         int entries, tlb_miss;
4053
4054         crtc = intel_get_crtc_for_plane(dev, plane);
4055         if (crtc->fb == NULL || !crtc->enabled) {
4056                 *cursor_wm = cursor->guard_size;
4057                 *plane_wm = display->guard_size;
4058                 return false;
4059         }
4060
4061         htotal = crtc->mode.htotal;
4062         hdisplay = crtc->mode.hdisplay;
4063         clock = crtc->mode.clock;
4064         pixel_size = crtc->fb->bits_per_pixel / 8;
4065
4066         /* Use the small buffer method to calculate plane watermark */
4067         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4068         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4069         if (tlb_miss > 0)
4070                 entries += tlb_miss;
4071         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4072         *plane_wm = entries + display->guard_size;
4073         if (*plane_wm > (int)display->max_wm)
4074                 *plane_wm = display->max_wm;
4075
4076         /* Use the large buffer method to calculate cursor watermark */
4077         line_time_us = ((htotal * 1000) / clock);
4078         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4079         entries = line_count * 64 * pixel_size;
4080         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4081         if (tlb_miss > 0)
4082                 entries += tlb_miss;
4083         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4084         *cursor_wm = entries + cursor->guard_size;
4085         if (*cursor_wm > (int)cursor->max_wm)
4086                 *cursor_wm = (int)cursor->max_wm;
4087
4088         return true;
4089 }
4090
4091 /*
4092  * Check the wm result.
4093  *
4094  * If any calculated watermark values is larger than the maximum value that
4095  * can be programmed into the associated watermark register, that watermark
4096  * must be disabled.
4097  */
4098 static bool g4x_check_srwm(struct drm_device *dev,
4099                            int display_wm, int cursor_wm,
4100                            const struct intel_watermark_params *display,
4101                            const struct intel_watermark_params *cursor)
4102 {
4103         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4104                       display_wm, cursor_wm);
4105
4106         if (display_wm > display->max_wm) {
4107                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4108                               display_wm, display->max_wm);
4109                 return false;
4110         }
4111
4112         if (cursor_wm > cursor->max_wm) {
4113                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4114                               cursor_wm, cursor->max_wm);
4115                 return false;
4116         }
4117
4118         if (!(display_wm || cursor_wm)) {
4119                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4120                 return false;
4121         }
4122
4123         return true;
4124 }
4125
4126 static bool g4x_compute_srwm(struct drm_device *dev,
4127                              int plane,
4128                              int latency_ns,
4129                              const struct intel_watermark_params *display,
4130                              const struct intel_watermark_params *cursor,
4131                              int *display_wm, int *cursor_wm)
4132 {
4133         struct drm_crtc *crtc;
4134         int hdisplay, htotal, pixel_size, clock;
4135         unsigned long line_time_us;
4136         int line_count, line_size;
4137         int small, large;
4138         int entries;
4139
4140         if (!latency_ns) {
4141                 *display_wm = *cursor_wm = 0;
4142                 return false;
4143         }
4144
4145         crtc = intel_get_crtc_for_plane(dev, plane);
4146         hdisplay = crtc->mode.hdisplay;
4147         htotal = crtc->mode.htotal;
4148         clock = crtc->mode.clock;
4149         pixel_size = crtc->fb->bits_per_pixel / 8;
4150
4151         line_time_us = (htotal * 1000) / clock;
4152         line_count = (latency_ns / line_time_us + 1000) / 1000;
4153         line_size = hdisplay * pixel_size;
4154
4155         /* Use the minimum of the small and large buffer method for primary */
4156         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4157         large = line_count * line_size;
4158
4159         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4160         *display_wm = entries + display->guard_size;
4161
4162         /* calculate the self-refresh watermark for display cursor */
4163         entries = line_count * pixel_size * 64;
4164         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4165         *cursor_wm = entries + cursor->guard_size;
4166
4167         return g4x_check_srwm(dev,
4168                               *display_wm, *cursor_wm,
4169                               display, cursor);
4170 }
4171
4172 #define single_plane_enabled(mask) is_power_of_2(mask)
4173
4174 static void g4x_update_wm(struct drm_device *dev)
4175 {
4176         static const int sr_latency_ns = 12000;
4177         struct drm_i915_private *dev_priv = dev->dev_private;
4178         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4179         int plane_sr, cursor_sr;
4180         unsigned int enabled = 0;
4181
4182         if (g4x_compute_wm0(dev, 0,
4183                             &g4x_wm_info, latency_ns,
4184                             &g4x_cursor_wm_info, latency_ns,
4185                             &planea_wm, &cursora_wm))
4186                 enabled |= 1;
4187
4188         if (g4x_compute_wm0(dev, 1,
4189                             &g4x_wm_info, latency_ns,
4190                             &g4x_cursor_wm_info, latency_ns,
4191                             &planeb_wm, &cursorb_wm))
4192                 enabled |= 2;
4193
4194         plane_sr = cursor_sr = 0;
4195         if (single_plane_enabled(enabled) &&
4196             g4x_compute_srwm(dev, ffs(enabled) - 1,
4197                              sr_latency_ns,
4198                              &g4x_wm_info,
4199                              &g4x_cursor_wm_info,
4200                              &plane_sr, &cursor_sr))
4201                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4202         else
4203                 I915_WRITE(FW_BLC_SELF,
4204                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4205
4206         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4207                       planea_wm, cursora_wm,
4208                       planeb_wm, cursorb_wm,
4209                       plane_sr, cursor_sr);
4210
4211         I915_WRITE(DSPFW1,
4212                    (plane_sr << DSPFW_SR_SHIFT) |
4213                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4214                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
4215                    planea_wm);
4216         I915_WRITE(DSPFW2,
4217                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4218                    (cursora_wm << DSPFW_CURSORA_SHIFT));
4219         /* HPLL off in SR has some issues on G4x... disable it */
4220         I915_WRITE(DSPFW3,
4221                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4222                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4223 }
4224
4225 static void i965_update_wm(struct drm_device *dev)
4226 {
4227         struct drm_i915_private *dev_priv = dev->dev_private;
4228         struct drm_crtc *crtc;
4229         int srwm = 1;
4230         int cursor_sr = 16;
4231
4232         /* Calc sr entries for one plane configs */
4233         crtc = single_enabled_crtc(dev);
4234         if (crtc) {
4235                 /* self-refresh has much higher latency */
4236                 static const int sr_latency_ns = 12000;
4237                 int clock = crtc->mode.clock;
4238                 int htotal = crtc->mode.htotal;
4239                 int hdisplay = crtc->mode.hdisplay;
4240                 int pixel_size = crtc->fb->bits_per_pixel / 8;
4241                 unsigned long line_time_us;
4242                 int entries;
4243
4244                 line_time_us = ((htotal * 1000) / clock);
4245
4246                 /* Use ns/us then divide to preserve precision */
4247                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4248                         pixel_size * hdisplay;
4249                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4250                 srwm = I965_FIFO_SIZE - entries;
4251                 if (srwm < 0)
4252                         srwm = 1;
4253                 srwm &= 0x1ff;
4254                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4255                               entries, srwm);
4256
4257                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4258                         pixel_size * 64;
4259                 entries = DIV_ROUND_UP(entries,
4260                                           i965_cursor_wm_info.cacheline_size);
4261                 cursor_sr = i965_cursor_wm_info.fifo_size -
4262                         (entries + i965_cursor_wm_info.guard_size);
4263
4264                 if (cursor_sr > i965_cursor_wm_info.max_wm)
4265                         cursor_sr = i965_cursor_wm_info.max_wm;
4266
4267                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4268                               "cursor %d\n", srwm, cursor_sr);
4269
4270                 if (IS_CRESTLINE(dev))
4271                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4272         } else {
4273                 /* Turn off self refresh if both pipes are enabled */
4274                 if (IS_CRESTLINE(dev))
4275                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4276                                    & ~FW_BLC_SELF_EN);
4277         }
4278
4279         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4280                       srwm);
4281
4282         /* 965 has limitations... */
4283         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4284                    (8 << 16) | (8 << 8) | (8 << 0));
4285         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4286         /* update cursor SR watermark */
4287         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4288 }
4289
4290 static void i9xx_update_wm(struct drm_device *dev)
4291 {
4292         struct drm_i915_private *dev_priv = dev->dev_private;
4293         const struct intel_watermark_params *wm_info;
4294         uint32_t fwater_lo;
4295         uint32_t fwater_hi;
4296         int cwm, srwm = 1;
4297         int fifo_size;
4298         int planea_wm, planeb_wm;
4299         struct drm_crtc *crtc, *enabled = NULL;
4300
4301         if (IS_I945GM(dev))
4302                 wm_info = &i945_wm_info;
4303         else if (!IS_GEN2(dev))
4304                 wm_info = &i915_wm_info;
4305         else
4306                 wm_info = &i855_wm_info;
4307
4308         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4309         crtc = intel_get_crtc_for_plane(dev, 0);
4310         if (crtc->enabled && crtc->fb) {
4311                 planea_wm = intel_calculate_wm(crtc->mode.clock,
4312                                                wm_info, fifo_size,
4313                                                crtc->fb->bits_per_pixel / 8,
4314                                                latency_ns);
4315                 enabled = crtc;
4316         } else
4317                 planea_wm = fifo_size - wm_info->guard_size;
4318
4319         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4320         crtc = intel_get_crtc_for_plane(dev, 1);
4321         if (crtc->enabled && crtc->fb) {
4322                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4323                                                wm_info, fifo_size,
4324                                                crtc->fb->bits_per_pixel / 8,
4325                                                latency_ns);
4326                 if (enabled == NULL)
4327                         enabled = crtc;
4328                 else
4329                         enabled = NULL;
4330         } else
4331                 planeb_wm = fifo_size - wm_info->guard_size;
4332
4333         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4334
4335         /*
4336          * Overlay gets an aggressive default since video jitter is bad.
4337          */
4338         cwm = 2;
4339
4340         /* Play safe and disable self-refresh before adjusting watermarks. */
4341         if (IS_I945G(dev) || IS_I945GM(dev))
4342                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4343         else if (IS_I915GM(dev))
4344                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4345
4346         /* Calc sr entries for one plane configs */
4347         if (HAS_FW_BLC(dev) && enabled) {
4348                 /* self-refresh has much higher latency */
4349                 static const int sr_latency_ns = 6000;
4350                 int clock = enabled->mode.clock;
4351                 int htotal = enabled->mode.htotal;
4352                 int hdisplay = enabled->mode.hdisplay;
4353                 int pixel_size = enabled->fb->bits_per_pixel / 8;
4354                 unsigned long line_time_us;
4355                 int entries;
4356
4357                 line_time_us = (htotal * 1000) / clock;
4358
4359                 /* Use ns/us then divide to preserve precision */
4360                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4361                         pixel_size * hdisplay;
4362                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4363                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4364                 srwm = wm_info->fifo_size - entries;
4365                 if (srwm < 0)
4366                         srwm = 1;
4367
4368                 if (IS_I945G(dev) || IS_I945GM(dev))
4369                         I915_WRITE(FW_BLC_SELF,
4370                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4371                 else if (IS_I915GM(dev))
4372                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4373         }
4374
4375         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4376                       planea_wm, planeb_wm, cwm, srwm);
4377
4378         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4379         fwater_hi = (cwm & 0x1f);
4380
4381         /* Set request length to 8 cachelines per fetch */
4382         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4383         fwater_hi = fwater_hi | (1 << 8);
4384
4385         I915_WRITE(FW_BLC, fwater_lo);
4386         I915_WRITE(FW_BLC2, fwater_hi);
4387
4388         if (HAS_FW_BLC(dev)) {
4389                 if (enabled) {
4390                         if (IS_I945G(dev) || IS_I945GM(dev))
4391                                 I915_WRITE(FW_BLC_SELF,
4392                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4393                         else if (IS_I915GM(dev))
4394                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4395                         DRM_DEBUG_KMS("memory self refresh enabled\n");
4396                 } else
4397                         DRM_DEBUG_KMS("memory self refresh disabled\n");
4398         }
4399 }
4400
4401 static void i830_update_wm(struct drm_device *dev)
4402 {
4403         struct drm_i915_private *dev_priv = dev->dev_private;
4404         struct drm_crtc *crtc;
4405         uint32_t fwater_lo;
4406         int planea_wm;
4407
4408         crtc = single_enabled_crtc(dev);
4409         if (crtc == NULL)
4410                 return;
4411
4412         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4413                                        dev_priv->display.get_fifo_size(dev, 0),
4414                                        crtc->fb->bits_per_pixel / 8,
4415                                        latency_ns);
4416         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4417         fwater_lo |= (3<<8) | planea_wm;
4418
4419         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4420
4421         I915_WRITE(FW_BLC, fwater_lo);
4422 }
4423
4424 #define ILK_LP0_PLANE_LATENCY           700
4425 #define ILK_LP0_CURSOR_LATENCY          1300
4426
4427 /*
4428  * Check the wm result.
4429  *
4430  * If any calculated watermark values is larger than the maximum value that
4431  * can be programmed into the associated watermark register, that watermark
4432  * must be disabled.
4433  */
4434 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4435                                 int fbc_wm, int display_wm, int cursor_wm,
4436                                 const struct intel_watermark_params *display,
4437                                 const struct intel_watermark_params *cursor)
4438 {
4439         struct drm_i915_private *dev_priv = dev->dev_private;
4440
4441         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4442                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4443
4444         if (fbc_wm > SNB_FBC_MAX_SRWM) {
4445                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4446                               fbc_wm, SNB_FBC_MAX_SRWM, level);
4447
4448                 /* fbc has it's own way to disable FBC WM */
4449                 I915_WRITE(DISP_ARB_CTL,
4450                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4451                 return false;
4452         }
4453
4454         if (display_wm > display->max_wm) {
4455                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4456                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
4457                 return false;
4458         }
4459
4460         if (cursor_wm > cursor->max_wm) {
4461                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4462                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4463                 return false;
4464         }
4465
4466         if (!(fbc_wm || display_wm || cursor_wm)) {
4467                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4468                 return false;
4469         }
4470
4471         return true;
4472 }
4473
4474 /*
4475  * Compute watermark values of WM[1-3],
4476  */
4477 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4478                                   int latency_ns,
4479                                   const struct intel_watermark_params *display,
4480                                   const struct intel_watermark_params *cursor,
4481                                   int *fbc_wm, int *display_wm, int *cursor_wm)
4482 {
4483         struct drm_crtc *crtc;
4484         unsigned long line_time_us;
4485         int hdisplay, htotal, pixel_size, clock;
4486         int line_count, line_size;
4487         int small, large;
4488         int entries;
4489
4490         if (!latency_ns) {
4491                 *fbc_wm = *display_wm = *cursor_wm = 0;
4492                 return false;
4493         }
4494
4495         crtc = intel_get_crtc_for_plane(dev, plane);
4496         hdisplay = crtc->mode.hdisplay;
4497         htotal = crtc->mode.htotal;
4498         clock = crtc->mode.clock;
4499         pixel_size = crtc->fb->bits_per_pixel / 8;
4500
4501         line_time_us = (htotal * 1000) / clock;
4502         line_count = (latency_ns / line_time_us + 1000) / 1000;
4503         line_size = hdisplay * pixel_size;
4504
4505         /* Use the minimum of the small and large buffer method for primary */
4506         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4507         large = line_count * line_size;
4508
4509         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4510         *display_wm = entries + display->guard_size;
4511
4512         /*
4513          * Spec says:
4514          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4515          */
4516         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4517
4518         /* calculate the self-refresh watermark for display cursor */
4519         entries = line_count * pixel_size * 64;
4520         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4521         *cursor_wm = entries + cursor->guard_size;
4522
4523         return ironlake_check_srwm(dev, level,
4524                                    *fbc_wm, *display_wm, *cursor_wm,
4525                                    display, cursor);
4526 }
4527
4528 static void ironlake_update_wm(struct drm_device *dev)
4529 {
4530         struct drm_i915_private *dev_priv = dev->dev_private;
4531         int fbc_wm, plane_wm, cursor_wm;
4532         unsigned int enabled;
4533
4534         enabled = 0;
4535         if (g4x_compute_wm0(dev, 0,
4536                             &ironlake_display_wm_info,
4537                             ILK_LP0_PLANE_LATENCY,
4538                             &ironlake_cursor_wm_info,
4539                             ILK_LP0_CURSOR_LATENCY,
4540                             &plane_wm, &cursor_wm)) {
4541                 I915_WRITE(WM0_PIPEA_ILK,
4542                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4543                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4544                               " plane %d, " "cursor: %d\n",
4545                               plane_wm, cursor_wm);
4546                 enabled |= 1;
4547         }
4548
4549         if (g4x_compute_wm0(dev, 1,
4550                             &ironlake_display_wm_info,
4551                             ILK_LP0_PLANE_LATENCY,
4552                             &ironlake_cursor_wm_info,
4553                             ILK_LP0_CURSOR_LATENCY,
4554                             &plane_wm, &cursor_wm)) {
4555                 I915_WRITE(WM0_PIPEB_ILK,
4556                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4557                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4558                               " plane %d, cursor: %d\n",
4559                               plane_wm, cursor_wm);
4560                 enabled |= 2;
4561         }
4562
4563         /*
4564          * Calculate and update the self-refresh watermark only when one
4565          * display plane is used.
4566          */
4567         I915_WRITE(WM3_LP_ILK, 0);
4568         I915_WRITE(WM2_LP_ILK, 0);
4569         I915_WRITE(WM1_LP_ILK, 0);
4570
4571         if (!single_plane_enabled(enabled))
4572                 return;
4573         enabled = ffs(enabled) - 1;
4574
4575         /* WM1 */
4576         if (!ironlake_compute_srwm(dev, 1, enabled,
4577                                    ILK_READ_WM1_LATENCY() * 500,
4578                                    &ironlake_display_srwm_info,
4579                                    &ironlake_cursor_srwm_info,
4580                                    &fbc_wm, &plane_wm, &cursor_wm))
4581                 return;
4582
4583         I915_WRITE(WM1_LP_ILK,
4584                    WM1_LP_SR_EN |
4585                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4586                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4587                    (plane_wm << WM1_LP_SR_SHIFT) |
4588                    cursor_wm);
4589
4590         /* WM2 */
4591         if (!ironlake_compute_srwm(dev, 2, enabled,
4592                                    ILK_READ_WM2_LATENCY() * 500,
4593                                    &ironlake_display_srwm_info,
4594                                    &ironlake_cursor_srwm_info,
4595                                    &fbc_wm, &plane_wm, &cursor_wm))
4596                 return;
4597
4598         I915_WRITE(WM2_LP_ILK,
4599                    WM2_LP_EN |
4600                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4601                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4602                    (plane_wm << WM1_LP_SR_SHIFT) |
4603                    cursor_wm);
4604
4605         /*
4606          * WM3 is unsupported on ILK, probably because we don't have latency
4607          * data for that power state
4608          */
4609 }
4610
4611 void sandybridge_update_wm(struct drm_device *dev)
4612 {
4613         struct drm_i915_private *dev_priv = dev->dev_private;
4614         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4615         u32 val;
4616         int fbc_wm, plane_wm, cursor_wm;
4617         unsigned int enabled;
4618
4619         enabled = 0;
4620         if (g4x_compute_wm0(dev, 0,
4621                             &sandybridge_display_wm_info, latency,
4622                             &sandybridge_cursor_wm_info, latency,
4623                             &plane_wm, &cursor_wm)) {
4624                 val = I915_READ(WM0_PIPEA_ILK);
4625                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4626                 I915_WRITE(WM0_PIPEA_ILK, val |
4627                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4628                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4629                               " plane %d, " "cursor: %d\n",
4630                               plane_wm, cursor_wm);
4631                 enabled |= 1;
4632         }
4633
4634         if (g4x_compute_wm0(dev, 1,
4635                             &sandybridge_display_wm_info, latency,
4636                             &sandybridge_cursor_wm_info, latency,
4637                             &plane_wm, &cursor_wm)) {
4638                 val = I915_READ(WM0_PIPEB_ILK);
4639                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4640                 I915_WRITE(WM0_PIPEB_ILK, val |
4641                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4642                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4643                               " plane %d, cursor: %d\n",
4644                               plane_wm, cursor_wm);
4645                 enabled |= 2;
4646         }
4647
4648         /* IVB has 3 pipes */
4649         if (IS_IVYBRIDGE(dev) &&
4650             g4x_compute_wm0(dev, 2,
4651                             &sandybridge_display_wm_info, latency,
4652                             &sandybridge_cursor_wm_info, latency,
4653                             &plane_wm, &cursor_wm)) {
4654                 val = I915_READ(WM0_PIPEC_IVB);
4655                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4656                 I915_WRITE(WM0_PIPEC_IVB, val |
4657                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4658                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4659                               " plane %d, cursor: %d\n",
4660                               plane_wm, cursor_wm);
4661                 enabled |= 3;
4662         }
4663
4664         /*
4665          * Calculate and update the self-refresh watermark only when one
4666          * display plane is used.
4667          *
4668          * SNB support 3 levels of watermark.
4669          *
4670          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4671          * and disabled in the descending order
4672          *
4673          */
4674         I915_WRITE(WM3_LP_ILK, 0);
4675         I915_WRITE(WM2_LP_ILK, 0);
4676         I915_WRITE(WM1_LP_ILK, 0);
4677
4678         if (!single_plane_enabled(enabled) ||
4679             dev_priv->sprite_scaling_enabled)
4680                 return;
4681         enabled = ffs(enabled) - 1;
4682
4683         /* WM1 */
4684         if (!ironlake_compute_srwm(dev, 1, enabled,
4685                                    SNB_READ_WM1_LATENCY() * 500,
4686                                    &sandybridge_display_srwm_info,
4687                                    &sandybridge_cursor_srwm_info,
4688                                    &fbc_wm, &plane_wm, &cursor_wm))
4689                 return;
4690
4691         I915_WRITE(WM1_LP_ILK,
4692                    WM1_LP_SR_EN |
4693                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4694                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4695                    (plane_wm << WM1_LP_SR_SHIFT) |
4696                    cursor_wm);
4697
4698         /* WM2 */
4699         if (!ironlake_compute_srwm(dev, 2, enabled,
4700                                    SNB_READ_WM2_LATENCY() * 500,
4701                                    &sandybridge_display_srwm_info,
4702                                    &sandybridge_cursor_srwm_info,
4703                                    &fbc_wm, &plane_wm, &cursor_wm))
4704                 return;
4705
4706         I915_WRITE(WM2_LP_ILK,
4707                    WM2_LP_EN |
4708                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4709                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4710                    (plane_wm << WM1_LP_SR_SHIFT) |
4711                    cursor_wm);
4712
4713         /* WM3 */
4714         if (!ironlake_compute_srwm(dev, 3, enabled,
4715                                    SNB_READ_WM3_LATENCY() * 500,
4716                                    &sandybridge_display_srwm_info,
4717                                    &sandybridge_cursor_srwm_info,
4718                                    &fbc_wm, &plane_wm, &cursor_wm))
4719                 return;
4720
4721         I915_WRITE(WM3_LP_ILK,
4722                    WM3_LP_EN |
4723                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4724                    (fbc_wm << WM1_LP_FBC_SHIFT) |
4725                    (plane_wm << WM1_LP_SR_SHIFT) |
4726                    cursor_wm);
4727 }
4728
4729 static bool
4730 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4731                               uint32_t sprite_width, int pixel_size,
4732                               const struct intel_watermark_params *display,
4733                               int display_latency_ns, int *sprite_wm)
4734 {
4735         struct drm_crtc *crtc;
4736         int clock;
4737         int entries, tlb_miss;
4738
4739         crtc = intel_get_crtc_for_plane(dev, plane);
4740         if (crtc->fb == NULL || !crtc->enabled) {
4741                 *sprite_wm = display->guard_size;
4742                 return false;
4743         }
4744
4745         clock = crtc->mode.clock;
4746
4747         /* Use the small buffer method to calculate the sprite watermark */
4748         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4749         tlb_miss = display->fifo_size*display->cacheline_size -
4750                 sprite_width * 8;
4751         if (tlb_miss > 0)
4752                 entries += tlb_miss;
4753         entries = DIV_ROUND_UP(entries, display->cacheline_size);
4754         *sprite_wm = entries + display->guard_size;
4755         if (*sprite_wm > (int)display->max_wm)
4756                 *sprite_wm = display->max_wm;
4757
4758         return true;
4759 }
4760
4761 static bool
4762 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4763                                 uint32_t sprite_width, int pixel_size,
4764                                 const struct intel_watermark_params *display,
4765                                 int latency_ns, int *sprite_wm)
4766 {
4767         struct drm_crtc *crtc;
4768         unsigned long line_time_us;
4769         int clock;
4770         int line_count, line_size;
4771         int small, large;
4772         int entries;
4773
4774         if (!latency_ns) {
4775                 *sprite_wm = 0;
4776                 return false;
4777         }
4778
4779         crtc = intel_get_crtc_for_plane(dev, plane);
4780         clock = crtc->mode.clock;
4781         if (!clock) {
4782                 *sprite_wm = 0;
4783                 return false;
4784         }
4785
4786         line_time_us = (sprite_width * 1000) / clock;
4787         if (!line_time_us) {
4788                 *sprite_wm = 0;
4789                 return false;
4790         }
4791
4792         line_count = (latency_ns / line_time_us + 1000) / 1000;
4793         line_size = sprite_width * pixel_size;
4794
4795         /* Use the minimum of the small and large buffer method for primary */
4796         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4797         large = line_count * line_size;
4798
4799         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4800         *sprite_wm = entries + display->guard_size;
4801
4802         return *sprite_wm > 0x3ff ? false : true;
4803 }
4804
4805 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4806                                          uint32_t sprite_width, int pixel_size)
4807 {
4808         struct drm_i915_private *dev_priv = dev->dev_private;
4809         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
4810         u32 val;
4811         int sprite_wm, reg;
4812         int ret;
4813
4814         switch (pipe) {
4815         case 0:
4816                 reg = WM0_PIPEA_ILK;
4817                 break;
4818         case 1:
4819                 reg = WM0_PIPEB_ILK;
4820                 break;
4821         case 2:
4822                 reg = WM0_PIPEC_IVB;
4823                 break;
4824         default:
4825                 return; /* bad pipe */
4826         }
4827
4828         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4829                                             &sandybridge_display_wm_info,
4830                                             latency, &sprite_wm);
4831         if (!ret) {
4832                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4833                               pipe);
4834                 return;
4835         }
4836
4837         val = I915_READ(reg);
4838         val &= ~WM0_PIPE_SPRITE_MASK;
4839         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4840         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4841
4842
4843         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4844                                               pixel_size,
4845                                               &sandybridge_display_srwm_info,
4846                                               SNB_READ_WM1_LATENCY() * 500,
4847                                               &sprite_wm);
4848         if (!ret) {
4849                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4850                               pipe);
4851                 return;
4852         }
4853         I915_WRITE(WM1S_LP_ILK, sprite_wm);
4854
4855         /* Only IVB has two more LP watermarks for sprite */
4856         if (!IS_IVYBRIDGE(dev))
4857                 return;
4858
4859         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4860                                               pixel_size,
4861                                               &sandybridge_display_srwm_info,
4862                                               SNB_READ_WM2_LATENCY() * 500,
4863                                               &sprite_wm);
4864         if (!ret) {
4865                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4866                               pipe);
4867                 return;
4868         }
4869         I915_WRITE(WM2S_LP_IVB, sprite_wm);
4870
4871         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4872                                               pixel_size,
4873                                               &sandybridge_display_srwm_info,
4874                                               SNB_READ_WM3_LATENCY() * 500,
4875                                               &sprite_wm);
4876         if (!ret) {
4877                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4878                               pipe);
4879                 return;
4880         }
4881         I915_WRITE(WM3S_LP_IVB, sprite_wm);
4882 }
4883
4884 /**
4885  * intel_update_watermarks - update FIFO watermark values based on current modes
4886  *
4887  * Calculate watermark values for the various WM regs based on current mode
4888  * and plane configuration.
4889  *
4890  * There are several cases to deal with here:
4891  *   - normal (i.e. non-self-refresh)
4892  *   - self-refresh (SR) mode
4893  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4894  *   - lines are small relative to FIFO size (buffer can hold more than 2
4895  *     lines), so need to account for TLB latency
4896  *
4897  *   The normal calculation is:
4898  *     watermark = dotclock * bytes per pixel * latency
4899  *   where latency is platform & configuration dependent (we assume pessimal
4900  *   values here).
4901  *
4902  *   The SR calculation is:
4903  *     watermark = (trunc(latency/line time)+1) * surface width *
4904  *       bytes per pixel
4905  *   where
4906  *     line time = htotal / dotclock
4907  *     surface width = hdisplay for normal plane and 64 for cursor
4908  *   and latency is assumed to be high, as above.
4909  *
4910  * The final value programmed to the register should always be rounded up,
4911  * and include an extra 2 entries to account for clock crossings.
4912  *
4913  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4914  * to set the non-SR watermarks to 8.
4915  */
4916 static void intel_update_watermarks(struct drm_device *dev)
4917 {
4918         struct drm_i915_private *dev_priv = dev->dev_private;
4919
4920         if (dev_priv->display.update_wm)
4921                 dev_priv->display.update_wm(dev);
4922 }
4923
4924 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4925                                     uint32_t sprite_width, int pixel_size)
4926 {
4927         struct drm_i915_private *dev_priv = dev->dev_private;
4928
4929         if (dev_priv->display.update_sprite_wm)
4930                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4931                                                    pixel_size);
4932 }
4933
4934 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4935 {
4936         if (i915_panel_use_ssc >= 0)
4937                 return i915_panel_use_ssc != 0;
4938         return dev_priv->lvds_use_ssc
4939                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4940 }
4941
4942 /**
4943  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4944  * @crtc: CRTC structure
4945  * @mode: requested mode
4946  *
4947  * A pipe may be connected to one or more outputs.  Based on the depth of the
4948  * attached framebuffer, choose a good color depth to use on the pipe.
4949  *
4950  * If possible, match the pipe depth to the fb depth.  In some cases, this
4951  * isn't ideal, because the connected output supports a lesser or restricted
4952  * set of depths.  Resolve that here:
4953  *    LVDS typically supports only 6bpc, so clamp down in that case
4954  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4955  *    Displays may support a restricted set as well, check EDID and clamp as
4956  *      appropriate.
4957  *    DP may want to dither down to 6bpc to fit larger modes
4958  *
4959  * RETURNS:
4960  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4961  * true if they don't match).
4962  */
4963 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4964                                          unsigned int *pipe_bpp,
4965                                          struct drm_display_mode *mode)
4966 {
4967         struct drm_device *dev = crtc->dev;
4968         struct drm_i915_private *dev_priv = dev->dev_private;
4969         struct drm_encoder *encoder;
4970         struct drm_connector *connector;
4971         unsigned int display_bpc = UINT_MAX, bpc;
4972
4973         /* Walk the encoders & connectors on this crtc, get min bpc */
4974         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4975                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4976
4977                 if (encoder->crtc != crtc)
4978                         continue;
4979
4980                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4981                         unsigned int lvds_bpc;
4982
4983                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4984                             LVDS_A3_POWER_UP)
4985                                 lvds_bpc = 8;
4986                         else
4987                                 lvds_bpc = 6;
4988
4989                         if (lvds_bpc < display_bpc) {
4990                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4991                                 display_bpc = lvds_bpc;
4992                         }
4993                         continue;
4994                 }
4995
4996                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4997                         /* Use VBT settings if we have an eDP panel */
4998                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4999
5000                         if (edp_bpc < display_bpc) {
5001                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5002                                 display_bpc = edp_bpc;
5003                         }
5004                         continue;
5005                 }
5006
5007                 /* Not one of the known troublemakers, check the EDID */
5008                 list_for_each_entry(connector, &dev->mode_config.connector_list,
5009                                     head) {
5010                         if (connector->encoder != encoder)
5011                                 continue;
5012
5013                         /* Don't use an invalid EDID bpc value */
5014                         if (connector->display_info.bpc &&
5015                             connector->display_info.bpc < display_bpc) {
5016                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5017                                 display_bpc = connector->display_info.bpc;
5018                         }
5019                 }
5020
5021                 /*
5022                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5023                  * through, clamp it down.  (Note: >12bpc will be caught below.)
5024                  */
5025                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5026                         if (display_bpc > 8 && display_bpc < 12) {
5027                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5028                                 display_bpc = 12;
5029                         } else {
5030                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5031                                 display_bpc = 8;
5032                         }
5033                 }
5034         }
5035
5036         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5037                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5038                 display_bpc = 6;
5039         }
5040
5041         /*
5042          * We could just drive the pipe at the highest bpc all the time and
5043          * enable dithering as needed, but that costs bandwidth.  So choose
5044          * the minimum value that expresses the full color range of the fb but
5045          * also stays within the max display bpc discovered above.
5046          */
5047
5048         switch (crtc->fb->depth) {
5049         case 8:
5050                 bpc = 8; /* since we go through a colormap */
5051                 break;
5052         case 15:
5053         case 16:
5054                 bpc = 6; /* min is 18bpp */
5055                 break;
5056         case 24:
5057                 bpc = 8;
5058                 break;
5059         case 30:
5060                 bpc = 10;
5061                 break;
5062         case 48:
5063                 bpc = 12;
5064                 break;
5065         default:
5066                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5067                 bpc = min((unsigned int)8, display_bpc);
5068                 break;
5069         }
5070
5071         display_bpc = min(display_bpc, bpc);
5072
5073         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5074                       bpc, display_bpc);
5075
5076         *pipe_bpp = display_bpc * 3;
5077
5078         return display_bpc != bpc;
5079 }
5080
5081 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5082 {
5083         struct drm_device *dev = crtc->dev;
5084         struct drm_i915_private *dev_priv = dev->dev_private;
5085         int refclk;
5086
5087         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5088             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5089                 refclk = dev_priv->lvds_ssc_freq * 1000;
5090                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5091                               refclk / 1000);
5092         } else if (!IS_GEN2(dev)) {
5093                 refclk = 96000;
5094         } else {
5095                 refclk = 48000;
5096         }
5097
5098         return refclk;
5099 }
5100
5101 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5102                                       intel_clock_t *clock)
5103 {
5104         /* SDVO TV has fixed PLL values depend on its clock range,
5105            this mirrors vbios setting. */
5106         if (adjusted_mode->clock >= 100000
5107             && adjusted_mode->clock < 140500) {
5108                 clock->p1 = 2;
5109                 clock->p2 = 10;
5110                 clock->n = 3;
5111                 clock->m1 = 16;
5112                 clock->m2 = 8;
5113         } else if (adjusted_mode->clock >= 140500
5114                    && adjusted_mode->clock <= 200000) {
5115                 clock->p1 = 1;
5116                 clock->p2 = 10;
5117                 clock->n = 6;
5118                 clock->m1 = 12;
5119                 clock->m2 = 8;
5120         }
5121 }
5122
5123 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5124                                      intel_clock_t *clock,
5125                                      intel_clock_t *reduced_clock)
5126 {
5127         struct drm_device *dev = crtc->dev;
5128         struct drm_i915_private *dev_priv = dev->dev_private;
5129         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5130         int pipe = intel_crtc->pipe;
5131         u32 fp, fp2 = 0;
5132
5133         if (IS_PINEVIEW(dev)) {
5134                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5135                 if (reduced_clock)
5136                         fp2 = (1 << reduced_clock->n) << 16 |
5137                                 reduced_clock->m1 << 8 | reduced_clock->m2;
5138         } else {
5139                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5140                 if (reduced_clock)
5141                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5142                                 reduced_clock->m2;
5143         }
5144
5145         I915_WRITE(FP0(pipe), fp);
5146
5147         intel_crtc->lowfreq_avail = false;
5148         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5149             reduced_clock && i915_powersave) {
5150                 I915_WRITE(FP1(pipe), fp2);
5151                 intel_crtc->lowfreq_avail = true;
5152         } else {
5153                 I915_WRITE(FP1(pipe), fp);
5154         }
5155 }
5156
5157 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5158                               struct drm_display_mode *mode,
5159                               struct drm_display_mode *adjusted_mode,
5160                               int x, int y,
5161                               struct drm_framebuffer *old_fb)
5162 {
5163         struct drm_device *dev = crtc->dev;
5164         struct drm_i915_private *dev_priv = dev->dev_private;
5165         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5166         int pipe = intel_crtc->pipe;
5167         int plane = intel_crtc->plane;
5168         int refclk, num_connectors = 0;
5169         intel_clock_t clock, reduced_clock;
5170         u32 dpll, dspcntr, pipeconf, vsyncshift;
5171         bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5172         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5173         struct drm_mode_config *mode_config = &dev->mode_config;
5174         struct intel_encoder *encoder;
5175         const intel_limit_t *limit;
5176         int ret;
5177         u32 temp;
5178         u32 lvds_sync = 0;
5179
5180         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5181                 if (encoder->base.crtc != crtc)
5182                         continue;
5183
5184                 switch (encoder->type) {
5185                 case INTEL_OUTPUT_LVDS:
5186                         is_lvds = true;
5187                         break;
5188                 case INTEL_OUTPUT_SDVO:
5189                 case INTEL_OUTPUT_HDMI:
5190                         is_sdvo = true;
5191                         if (encoder->needs_tv_clock)
5192                                 is_tv = true;
5193                         break;
5194                 case INTEL_OUTPUT_DVO:
5195                         is_dvo = true;
5196                         break;
5197                 case INTEL_OUTPUT_TVOUT:
5198                         is_tv = true;
5199                         break;
5200                 case INTEL_OUTPUT_ANALOG:
5201                         is_crt = true;
5202                         break;
5203                 case INTEL_OUTPUT_DISPLAYPORT:
5204                         is_dp = true;
5205                         break;
5206                 }
5207
5208                 num_connectors++;
5209         }
5210
5211         refclk = i9xx_get_refclk(crtc, num_connectors);
5212
5213         /*
5214          * Returns a set of divisors for the desired target clock with the given
5215          * refclk, or FALSE.  The returned values represent the clock equation:
5216          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5217          */
5218         limit = intel_limit(crtc, refclk);
5219         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5220                              &clock);
5221         if (!ok) {
5222                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5223                 return -EINVAL;
5224         }
5225
5226         /* Ensure that the cursor is valid for the new mode before changing... */
5227         intel_crtc_update_cursor(crtc, true);
5228
5229         if (is_lvds && dev_priv->lvds_downclock_avail) {
5230                 /*
5231                  * Ensure we match the reduced clock's P to the target clock.
5232                  * If the clocks don't match, we can't switch the display clock
5233                  * by using the FP0/FP1. In such case we will disable the LVDS
5234                  * downclock feature.
5235                 */
5236                 has_reduced_clock = limit->find_pll(limit, crtc,
5237                                                     dev_priv->lvds_downclock,
5238                                                     refclk,
5239                                                     &clock,
5240                                                     &reduced_clock);
5241         }
5242
5243         if (is_sdvo && is_tv)
5244                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5245
5246         i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5247                                  &reduced_clock : NULL);
5248
5249         dpll = DPLL_VGA_MODE_DIS;
5250
5251         if (!IS_GEN2(dev)) {
5252                 if (is_lvds)
5253                         dpll |= DPLLB_MODE_LVDS;
5254                 else
5255                         dpll |= DPLLB_MODE_DAC_SERIAL;
5256                 if (is_sdvo) {
5257                         int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5258                         if (pixel_multiplier > 1) {
5259                                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5260                                         dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5261                         }
5262                         dpll |= DPLL_DVO_HIGH_SPEED;
5263                 }
5264                 if (is_dp)
5265                         dpll |= DPLL_DVO_HIGH_SPEED;
5266
5267                 /* compute bitmask from p1 value */
5268                 if (IS_PINEVIEW(dev))
5269                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5270                 else {
5271                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5272                         if (IS_G4X(dev) && has_reduced_clock)
5273                                 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5274                 }
5275                 switch (clock.p2) {
5276                 case 5:
5277                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5278                         break;
5279                 case 7:
5280                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5281                         break;
5282                 case 10:
5283                         dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5284                         break;
5285                 case 14:
5286                         dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5287                         break;
5288                 }
5289                 if (INTEL_INFO(dev)->gen >= 4)
5290                         dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5291         } else {
5292                 if (is_lvds) {
5293                         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5294                 } else {
5295                         if (clock.p1 == 2)
5296                                 dpll |= PLL_P1_DIVIDE_BY_TWO;
5297                         else
5298                                 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5299                         if (clock.p2 == 4)
5300                                 dpll |= PLL_P2_DIVIDE_BY_4;
5301                 }
5302         }
5303
5304         if (is_sdvo && is_tv)
5305                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5306         else if (is_tv)
5307                 /* XXX: just matching BIOS for now */
5308                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5309                 dpll |= 3;
5310         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5311                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5312         else
5313                 dpll |= PLL_REF_INPUT_DREFCLK;
5314
5315         /* setup pipeconf */
5316         pipeconf = I915_READ(PIPECONF(pipe));
5317
5318         /* Set up the display plane register */
5319         dspcntr = DISPPLANE_GAMMA_ENABLE;
5320
5321         if (pipe == 0)
5322                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5323         else
5324                 dspcntr |= DISPPLANE_SEL_PIPE_B;
5325
5326         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5327                 /* Enable pixel doubling when the dot clock is > 85% of the (display)
5328                  * core speed.
5329                  *
5330                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5331                  * pipe == 0 check?
5332                  */
5333                 if (mode->clock >
5334                     dev_priv->display.get_display_clock_speed(dev) * 17 / 20)
5335                         pipeconf |= PIPECONF_DOUBLE_WIDE;
5336                 else
5337                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5338         }
5339
5340         /* default to 8bpc */
5341         pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5342         if (is_dp) {
5343                 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5344                         pipeconf |= PIPECONF_BPP_6 |
5345                                     PIPECONF_DITHER_EN |
5346                                     PIPECONF_DITHER_TYPE_SP;
5347                 }
5348         }
5349
5350         dpll |= DPLL_VCO_ENABLE;
5351
5352         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5353         drm_mode_debug_printmodeline(mode);
5354
5355         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5356
5357         POSTING_READ(DPLL(pipe));
5358         udelay(150);
5359
5360         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5361          * This is an exception to the general rule that mode_set doesn't turn
5362          * things on.
5363          */
5364         if (is_lvds) {
5365                 temp = I915_READ(LVDS);
5366                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5367                 if (pipe == 1) {
5368                         temp |= LVDS_PIPEB_SELECT;
5369                 } else {
5370                         temp &= ~LVDS_PIPEB_SELECT;
5371                 }
5372                 /* set the corresponsding LVDS_BORDER bit */
5373                 temp |= dev_priv->lvds_border_bits;
5374                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5375                  * set the DPLLs for dual-channel mode or not.
5376                  */
5377                 if (clock.p2 == 7)
5378                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5379                 else
5380                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5381
5382                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5383                  * appropriately here, but we need to look more thoroughly into how
5384                  * panels behave in the two modes.
5385                  */
5386                 /* set the dithering flag on LVDS as needed */
5387                 if (INTEL_INFO(dev)->gen >= 4) {
5388                         if (dev_priv->lvds_dither)
5389                                 temp |= LVDS_ENABLE_DITHER;
5390                         else
5391                                 temp &= ~LVDS_ENABLE_DITHER;
5392                 }
5393                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5394                         lvds_sync |= LVDS_HSYNC_POLARITY;
5395                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5396                         lvds_sync |= LVDS_VSYNC_POLARITY;
5397                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5398                     != lvds_sync) {
5399                         char flags[2] = "-+";
5400                         DRM_INFO("Changing LVDS panel from "
5401                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5402                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5403                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5404                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5405                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5406                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5407                         temp |= lvds_sync;
5408                 }
5409                 I915_WRITE(LVDS, temp);
5410         }
5411
5412         if (is_dp) {
5413                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5414         }
5415
5416         I915_WRITE(DPLL(pipe), dpll);
5417
5418         /* Wait for the clocks to stabilize. */
5419         POSTING_READ(DPLL(pipe));
5420         udelay(150);
5421
5422         if (INTEL_INFO(dev)->gen >= 4) {
5423                 temp = 0;
5424                 if (is_sdvo) {
5425                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5426                         if (temp > 1)
5427                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5428                         else
5429                                 temp = 0;
5430                 }
5431                 I915_WRITE(DPLL_MD(pipe), temp);
5432         } else {
5433                 /* The pixel multiplier can only be updated once the
5434                  * DPLL is enabled and the clocks are stable.
5435                  *
5436                  * So write it again.
5437                  */
5438                 I915_WRITE(DPLL(pipe), dpll);
5439         }
5440
5441         if (HAS_PIPE_CXSR(dev)) {
5442                 if (intel_crtc->lowfreq_avail) {
5443                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5444                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5445                 } else {
5446                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5447                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5448                 }
5449         }
5450
5451         pipeconf &= ~PIPECONF_INTERLACE_MASK;
5452         if (!IS_GEN2(dev) &&
5453             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5454                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5455                 /* the chip adds 2 halflines automatically */
5456                 adjusted_mode->crtc_vtotal -= 1;
5457                 adjusted_mode->crtc_vblank_end -= 1;
5458                 vsyncshift = adjusted_mode->crtc_hsync_start
5459                              - adjusted_mode->crtc_htotal/2;
5460         } else {
5461                 pipeconf |= PIPECONF_PROGRESSIVE;
5462                 vsyncshift = 0;
5463         }
5464
5465         if (!IS_GEN3(dev))
5466                 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5467
5468         I915_WRITE(HTOTAL(pipe),
5469                    (adjusted_mode->crtc_hdisplay - 1) |
5470                    ((adjusted_mode->crtc_htotal - 1) << 16));
5471         I915_WRITE(HBLANK(pipe),
5472                    (adjusted_mode->crtc_hblank_start - 1) |
5473                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5474         I915_WRITE(HSYNC(pipe),
5475                    (adjusted_mode->crtc_hsync_start - 1) |
5476                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5477
5478         I915_WRITE(VTOTAL(pipe),
5479                    (adjusted_mode->crtc_vdisplay - 1) |
5480                    ((adjusted_mode->crtc_vtotal - 1) << 16));
5481         I915_WRITE(VBLANK(pipe),
5482                    (adjusted_mode->crtc_vblank_start - 1) |
5483                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
5484         I915_WRITE(VSYNC(pipe),
5485                    (adjusted_mode->crtc_vsync_start - 1) |
5486                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5487
5488         /* pipesrc and dspsize control the size that is scaled from,
5489          * which should always be the user's requested size.
5490          */
5491         I915_WRITE(DSPSIZE(plane),
5492                    ((mode->vdisplay - 1) << 16) |
5493                    (mode->hdisplay - 1));
5494         I915_WRITE(DSPPOS(plane), 0);
5495         I915_WRITE(PIPESRC(pipe),
5496                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5497
5498         I915_WRITE(PIPECONF(pipe), pipeconf);
5499         POSTING_READ(PIPECONF(pipe));
5500         intel_enable_pipe(dev_priv, pipe, false);
5501
5502         intel_wait_for_vblank(dev, pipe);
5503
5504         I915_WRITE(DSPCNTR(plane), dspcntr);
5505         POSTING_READ(DSPCNTR(plane));
5506         intel_enable_plane(dev_priv, plane, pipe);
5507
5508         ret = intel_pipe_set_base(crtc, x, y, old_fb);
5509
5510         intel_update_watermarks(dev);
5511
5512         return ret;
5513 }
5514
5515 /*
5516  * Initialize reference clocks when the driver loads
5517  */
5518 void ironlake_init_pch_refclk(struct drm_device *dev)
5519 {
5520         struct drm_i915_private *dev_priv = dev->dev_private;
5521         struct drm_mode_config *mode_config = &dev->mode_config;
5522         struct intel_encoder *encoder;
5523         u32 temp;
5524         bool has_lvds = false;
5525         bool has_cpu_edp = false;
5526         bool has_pch_edp = false;
5527         bool has_panel = false;
5528         bool has_ck505 = false;
5529         bool can_ssc = false;
5530
5531         /* We need to take the global config into account */
5532         list_for_each_entry(encoder, &mode_config->encoder_list,
5533                             base.head) {
5534                 switch (encoder->type) {
5535                 case INTEL_OUTPUT_LVDS:
5536                         has_panel = true;
5537                         has_lvds = true;
5538                         break;
5539                 case INTEL_OUTPUT_EDP:
5540                         has_panel = true;
5541                         if (intel_encoder_is_pch_edp(&encoder->base))
5542                                 has_pch_edp = true;
5543                         else
5544                                 has_cpu_edp = true;
5545                         break;
5546                 }
5547         }
5548
5549         if (HAS_PCH_IBX(dev)) {
5550                 has_ck505 = dev_priv->display_clock_mode;
5551                 can_ssc = has_ck505;
5552         } else {
5553                 has_ck505 = false;
5554                 can_ssc = true;
5555         }
5556
5557         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5558                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5559                       has_ck505);
5560
5561         /* Ironlake: try to setup display ref clock before DPLL
5562          * enabling. This is only under driver's control after
5563          * PCH B stepping, previous chipset stepping should be
5564          * ignoring this setting.
5565          */
5566         temp = I915_READ(PCH_DREF_CONTROL);
5567         /* Always enable nonspread source */
5568         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5569
5570         if (has_ck505)
5571                 temp |= DREF_NONSPREAD_CK505_ENABLE;
5572         else
5573                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5574
5575         if (has_panel) {
5576                 temp &= ~DREF_SSC_SOURCE_MASK;
5577                 temp |= DREF_SSC_SOURCE_ENABLE;
5578
5579                 /* SSC must be turned on before enabling the CPU output  */
5580                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5581                         DRM_DEBUG_KMS("Using SSC on panel\n");
5582                         temp |= DREF_SSC1_ENABLE;
5583                 } else
5584                         temp &= ~DREF_SSC1_ENABLE;
5585
5586                 /* Get SSC going before enabling the outputs */
5587                 I915_WRITE(PCH_DREF_CONTROL, temp);
5588                 POSTING_READ(PCH_DREF_CONTROL);
5589                 udelay(200);
5590
5591                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5592
5593                 /* Enable CPU source on CPU attached eDP */
5594                 if (has_cpu_edp) {
5595                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5596                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
5597                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5598                         }
5599                         else
5600                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5601                 } else
5602                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5603
5604                 I915_WRITE(PCH_DREF_CONTROL, temp);
5605                 POSTING_READ(PCH_DREF_CONTROL);
5606                 udelay(200);
5607         } else {
5608                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5609
5610                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5611
5612                 /* Turn off CPU output */
5613                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5614
5615                 I915_WRITE(PCH_DREF_CONTROL, temp);
5616                 POSTING_READ(PCH_DREF_CONTROL);
5617                 udelay(200);
5618
5619                 /* Turn off the SSC source */
5620                 temp &= ~DREF_SSC_SOURCE_MASK;
5621                 temp |= DREF_SSC_SOURCE_DISABLE;
5622
5623                 /* Turn off SSC1 */
5624                 temp &= ~ DREF_SSC1_ENABLE;
5625
5626                 I915_WRITE(PCH_DREF_CONTROL, temp);
5627                 POSTING_READ(PCH_DREF_CONTROL);
5628                 udelay(200);
5629         }
5630 }
5631
5632 static int ironlake_get_refclk(struct drm_crtc *crtc)
5633 {
5634         struct drm_device *dev = crtc->dev;
5635         struct drm_i915_private *dev_priv = dev->dev_private;
5636         struct intel_encoder *encoder;
5637         struct drm_mode_config *mode_config = &dev->mode_config;
5638         struct intel_encoder *edp_encoder = NULL;
5639         int num_connectors = 0;
5640         bool is_lvds = false;
5641
5642         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5643                 if (encoder->base.crtc != crtc)
5644                         continue;
5645
5646                 switch (encoder->type) {
5647                 case INTEL_OUTPUT_LVDS:
5648                         is_lvds = true;
5649                         break;
5650                 case INTEL_OUTPUT_EDP:
5651                         edp_encoder = encoder;
5652                         break;
5653                 }
5654                 num_connectors++;
5655         }
5656
5657         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5658                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5659                               dev_priv->lvds_ssc_freq);
5660                 return dev_priv->lvds_ssc_freq * 1000;
5661         }
5662
5663         return 120000;
5664 }
5665
5666 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5667                                   struct drm_display_mode *mode,
5668                                   struct drm_display_mode *adjusted_mode,
5669                                   int x, int y,
5670                                   struct drm_framebuffer *old_fb)
5671 {
5672         struct drm_device *dev = crtc->dev;
5673         struct drm_i915_private *dev_priv = dev->dev_private;
5674         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5675         int pipe = intel_crtc->pipe;
5676         int plane = intel_crtc->plane;
5677         int refclk, num_connectors = 0;
5678         intel_clock_t clock, reduced_clock;
5679         u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5680         bool ok, has_reduced_clock = false, is_sdvo = false;
5681         bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5682         struct intel_encoder *has_edp_encoder = NULL;
5683         struct drm_mode_config *mode_config = &dev->mode_config;
5684         struct intel_encoder *encoder;
5685         const intel_limit_t *limit;
5686         int ret;
5687         struct fdi_m_n m_n = {0};
5688         u32 temp;
5689         u32 lvds_sync = 0;
5690         int target_clock, pixel_multiplier, lane, link_bw, factor;
5691         unsigned int pipe_bpp;
5692         bool dither;
5693
5694         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5695                 if (encoder->base.crtc != crtc)
5696                         continue;
5697
5698                 switch (encoder->type) {
5699                 case INTEL_OUTPUT_LVDS:
5700                         is_lvds = true;
5701                         break;
5702                 case INTEL_OUTPUT_SDVO:
5703                 case INTEL_OUTPUT_HDMI:
5704                         is_sdvo = true;
5705                         if (encoder->needs_tv_clock)
5706                                 is_tv = true;
5707                         break;
5708                 case INTEL_OUTPUT_TVOUT:
5709                         is_tv = true;
5710                         break;
5711                 case INTEL_OUTPUT_ANALOG:
5712                         is_crt = true;
5713                         break;
5714                 case INTEL_OUTPUT_DISPLAYPORT:
5715                         is_dp = true;
5716                         break;
5717                 case INTEL_OUTPUT_EDP:
5718                         has_edp_encoder = encoder;
5719                         break;
5720                 }
5721
5722                 num_connectors++;
5723         }
5724
5725         refclk = ironlake_get_refclk(crtc);
5726
5727         /*
5728          * Returns a set of divisors for the desired target clock with the given
5729          * refclk, or FALSE.  The returned values represent the clock equation:
5730          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5731          */
5732         limit = intel_limit(crtc, refclk);
5733         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5734                              &clock);
5735         if (!ok) {
5736                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5737                 return -EINVAL;
5738         }
5739
5740         /* Ensure that the cursor is valid for the new mode before changing... */
5741         intel_crtc_update_cursor(crtc, true);
5742
5743         if (is_lvds && dev_priv->lvds_downclock_avail) {
5744                 /*
5745                  * Ensure we match the reduced clock's P to the target clock.
5746                  * If the clocks don't match, we can't switch the display clock
5747                  * by using the FP0/FP1. In such case we will disable the LVDS
5748                  * downclock feature.
5749                 */
5750                 has_reduced_clock = limit->find_pll(limit, crtc,
5751                                                     dev_priv->lvds_downclock,
5752                                                     refclk,
5753                                                     &clock,
5754                                                     &reduced_clock);
5755         }
5756         /* SDVO TV has fixed PLL values depend on its clock range,
5757            this mirrors vbios setting. */
5758         if (is_sdvo && is_tv) {
5759                 if (adjusted_mode->clock >= 100000
5760                     && adjusted_mode->clock < 140500) {
5761                         clock.p1 = 2;
5762                         clock.p2 = 10;
5763                         clock.n = 3;
5764                         clock.m1 = 16;
5765                         clock.m2 = 8;
5766                 } else if (adjusted_mode->clock >= 140500
5767                            && adjusted_mode->clock <= 200000) {
5768                         clock.p1 = 1;
5769                         clock.p2 = 10;
5770                         clock.n = 6;
5771                         clock.m1 = 12;
5772                         clock.m2 = 8;
5773                 }
5774         }
5775
5776         /* FDI link */
5777         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5778         lane = 0;
5779         /* CPU eDP doesn't require FDI link, so just set DP M/N
5780            according to current link config */
5781         if (has_edp_encoder &&
5782             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5783                 target_clock = mode->clock;
5784                 intel_edp_link_config(has_edp_encoder,
5785                                       &lane, &link_bw);
5786         } else {
5787                 /* [e]DP over FDI requires target mode clock
5788                    instead of link clock */
5789                 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5790                         target_clock = mode->clock;
5791                 else
5792                         target_clock = adjusted_mode->clock;
5793
5794                 /* FDI is a binary signal running at ~2.7GHz, encoding
5795                  * each output octet as 10 bits. The actual frequency
5796                  * is stored as a divider into a 100MHz clock, and the
5797                  * mode pixel clock is stored in units of 1KHz.
5798                  * Hence the bw of each lane in terms of the mode signal
5799                  * is:
5800                  */
5801                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5802         }
5803
5804         /* determine panel color depth */
5805         temp = I915_READ(PIPECONF(pipe));
5806         temp &= ~PIPE_BPC_MASK;
5807         dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5808         switch (pipe_bpp) {
5809         case 18:
5810                 temp |= PIPE_6BPC;
5811                 break;
5812         case 24:
5813                 temp |= PIPE_8BPC;
5814                 break;
5815         case 30:
5816                 temp |= PIPE_10BPC;
5817                 break;
5818         case 36:
5819                 temp |= PIPE_12BPC;
5820                 break;
5821         default:
5822                 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5823                         pipe_bpp);
5824                 temp |= PIPE_8BPC;
5825                 pipe_bpp = 24;
5826                 break;
5827         }
5828
5829         intel_crtc->bpp = pipe_bpp;
5830         I915_WRITE(PIPECONF(pipe), temp);
5831
5832         if (!lane) {
5833                 /*
5834                  * Account for spread spectrum to avoid
5835                  * oversubscribing the link. Max center spread
5836                  * is 2.5%; use 5% for safety's sake.
5837                  */
5838                 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5839                 lane = bps / (link_bw * 8) + 1;
5840         }
5841
5842         intel_crtc->fdi_lanes = lane;
5843
5844         if (pixel_multiplier > 1)
5845                 link_bw *= pixel_multiplier;
5846         ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5847                              &m_n);
5848
5849         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5850         if (has_reduced_clock)
5851                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5852                         reduced_clock.m2;
5853
5854         /* Enable autotuning of the PLL clock (if permissible) */
5855         factor = 21;
5856         if (is_lvds) {
5857                 if ((intel_panel_use_ssc(dev_priv) &&
5858                      dev_priv->lvds_ssc_freq == 100) ||
5859                     (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5860                         factor = 25;
5861         } else if (is_sdvo && is_tv)
5862                 factor = 20;
5863
5864         if (clock.m < factor * clock.n)
5865                 fp |= FP_CB_TUNE;
5866
5867         dpll = 0;
5868
5869         if (is_lvds)
5870                 dpll |= DPLLB_MODE_LVDS;
5871         else
5872                 dpll |= DPLLB_MODE_DAC_SERIAL;
5873         if (is_sdvo) {
5874                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5875                 if (pixel_multiplier > 1) {
5876                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5877                 }
5878                 dpll |= DPLL_DVO_HIGH_SPEED;
5879         }
5880         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5881                 dpll |= DPLL_DVO_HIGH_SPEED;
5882
5883         /* compute bitmask from p1 value */
5884         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5885         /* also FPA1 */
5886         dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5887
5888         switch (clock.p2) {
5889         case 5:
5890                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5891                 break;
5892         case 7:
5893                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5894                 break;
5895         case 10:
5896                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5897                 break;
5898         case 14:
5899                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5900                 break;
5901         }
5902
5903         if (is_sdvo && is_tv)
5904                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5905         else if (is_tv)
5906                 /* XXX: just matching BIOS for now */
5907                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5908                 dpll |= 3;
5909         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5910                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5911         else
5912                 dpll |= PLL_REF_INPUT_DREFCLK;
5913
5914         /* setup pipeconf */
5915         pipeconf = I915_READ(PIPECONF(pipe));
5916
5917         /* Set up the display plane register */
5918         dspcntr = DISPPLANE_GAMMA_ENABLE;
5919
5920         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5921         drm_mode_debug_printmodeline(mode);
5922
5923         /* PCH eDP needs FDI, but CPU eDP does not */
5924         if (!intel_crtc->no_pll) {
5925                 if (!has_edp_encoder ||
5926                     intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5927                         I915_WRITE(PCH_FP0(pipe), fp);
5928                         I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5929
5930                         POSTING_READ(PCH_DPLL(pipe));
5931                         udelay(150);
5932                 }
5933         } else {
5934                 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5935                     fp == I915_READ(PCH_FP0(0))) {
5936                         intel_crtc->use_pll_a = true;
5937                         DRM_DEBUG_KMS("using pipe a dpll\n");
5938                 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5939                            fp == I915_READ(PCH_FP0(1))) {
5940                         intel_crtc->use_pll_a = false;
5941                         DRM_DEBUG_KMS("using pipe b dpll\n");
5942                 } else {
5943                         DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5944                         return -EINVAL;
5945                 }
5946         }
5947
5948         /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5949          * This is an exception to the general rule that mode_set doesn't turn
5950          * things on.
5951          */
5952         if (is_lvds) {
5953                 temp = I915_READ(PCH_LVDS);
5954                 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5955                 if (HAS_PCH_CPT(dev)) {
5956                         temp &= ~PORT_TRANS_SEL_MASK;
5957                         temp |= PORT_TRANS_SEL_CPT(pipe);
5958                 } else {
5959                         if (pipe == 1)
5960                                 temp |= LVDS_PIPEB_SELECT;
5961                         else
5962                                 temp &= ~LVDS_PIPEB_SELECT;
5963                 }
5964
5965                 /* set the corresponsding LVDS_BORDER bit */
5966                 temp |= dev_priv->lvds_border_bits;
5967                 /* Set the B0-B3 data pairs corresponding to whether we're going to
5968                  * set the DPLLs for dual-channel mode or not.
5969                  */
5970                 if (clock.p2 == 7)
5971                         temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5972                 else
5973                         temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5974
5975                 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5976                  * appropriately here, but we need to look more thoroughly into how
5977                  * panels behave in the two modes.
5978                  */
5979                 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5980                         lvds_sync |= LVDS_HSYNC_POLARITY;
5981                 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5982                         lvds_sync |= LVDS_VSYNC_POLARITY;
5983                 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5984                     != lvds_sync) {
5985                         char flags[2] = "-+";
5986                         DRM_INFO("Changing LVDS panel from "
5987                                  "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5988                                  flags[!(temp & LVDS_HSYNC_POLARITY)],
5989                                  flags[!(temp & LVDS_VSYNC_POLARITY)],
5990                                  flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5991                                  flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5992                         temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5993                         temp |= lvds_sync;
5994                 }
5995                 I915_WRITE(PCH_LVDS, temp);
5996         }
5997
5998         pipeconf &= ~PIPECONF_DITHER_EN;
5999         pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6000         if ((is_lvds && dev_priv->lvds_dither) || dither) {
6001                 pipeconf |= PIPECONF_DITHER_EN;
6002                 pipeconf |= PIPECONF_DITHER_TYPE_SP;
6003         }
6004         if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6005                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
6006         } else {
6007                 /* For non-DP output, clear any trans DP clock recovery setting.*/
6008                 I915_WRITE(TRANSDATA_M1(pipe), 0);
6009                 I915_WRITE(TRANSDATA_N1(pipe), 0);
6010                 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6011                 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6012         }
6013
6014         if (!intel_crtc->no_pll &&
6015             (!has_edp_encoder ||
6016              intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6017                 I915_WRITE(PCH_DPLL(pipe), dpll);
6018
6019                 /* Wait for the clocks to stabilize. */
6020                 POSTING_READ(PCH_DPLL(pipe));
6021                 udelay(150);
6022
6023                 /* The pixel multiplier can only be updated once the
6024                  * DPLL is enabled and the clocks are stable.
6025                  *
6026                  * So write it again.
6027                  */
6028                 I915_WRITE(PCH_DPLL(pipe), dpll);
6029         }
6030
6031         intel_crtc->lowfreq_avail = false;
6032         if (!intel_crtc->no_pll) {
6033                 if (is_lvds && has_reduced_clock && i915_powersave) {
6034                         I915_WRITE(PCH_FP1(pipe), fp2);
6035                         intel_crtc->lowfreq_avail = true;
6036                         if (HAS_PIPE_CXSR(dev)) {
6037                                 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6038                                 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6039                         }
6040                 } else {
6041                         I915_WRITE(PCH_FP1(pipe), fp);
6042                         if (HAS_PIPE_CXSR(dev)) {
6043                                 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6044                                 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6045                         }
6046                 }
6047         }
6048
6049         pipeconf &= ~PIPECONF_INTERLACE_MASK;
6050         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6051                 pipeconf |= PIPECONF_INTERLACED_ILK;
6052                 /* the chip adds 2 halflines automatically */
6053                 adjusted_mode->crtc_vtotal -= 1;
6054                 adjusted_mode->crtc_vblank_end -= 1;
6055                 I915_WRITE(VSYNCSHIFT(pipe),
6056                            adjusted_mode->crtc_hsync_start
6057                            - adjusted_mode->crtc_htotal/2);
6058         } else {
6059                 pipeconf |= PIPECONF_PROGRESSIVE;
6060                 I915_WRITE(VSYNCSHIFT(pipe), 0);
6061         }
6062
6063         I915_WRITE(HTOTAL(pipe),
6064                    (adjusted_mode->crtc_hdisplay - 1) |
6065                    ((adjusted_mode->crtc_htotal - 1) << 16));
6066         I915_WRITE(HBLANK(pipe),
6067                    (adjusted_mode->crtc_hblank_start - 1) |
6068                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
6069         I915_WRITE(HSYNC(pipe),
6070                    (adjusted_mode->crtc_hsync_start - 1) |
6071                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
6072
6073         I915_WRITE(VTOTAL(pipe),
6074                    (adjusted_mode->crtc_vdisplay - 1) |
6075                    ((adjusted_mode->crtc_vtotal - 1) << 16));
6076         I915_WRITE(VBLANK(pipe),
6077                    (adjusted_mode->crtc_vblank_start - 1) |
6078                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
6079         I915_WRITE(VSYNC(pipe),
6080                    (adjusted_mode->crtc_vsync_start - 1) |
6081                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
6082
6083         /* pipesrc controls the size that is scaled from, which should
6084          * always be the user's requested size.
6085          */
6086         I915_WRITE(PIPESRC(pipe),
6087                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6088
6089         I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6090         I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6091         I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6092         I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6093
6094         if (has_edp_encoder &&
6095             !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6096                 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6097         }
6098
6099         I915_WRITE(PIPECONF(pipe), pipeconf);
6100         POSTING_READ(PIPECONF(pipe));
6101
6102         intel_wait_for_vblank(dev, pipe);
6103
6104         I915_WRITE(DSPCNTR(plane), dspcntr);
6105         POSTING_READ(DSPCNTR(plane));
6106
6107         ret = intel_pipe_set_base(crtc, x, y, old_fb);
6108
6109         intel_update_watermarks(dev);
6110
6111         return ret;
6112 }
6113
6114 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6115                                struct drm_display_mode *mode,
6116                                struct drm_display_mode *adjusted_mode,
6117                                int x, int y,
6118                                struct drm_framebuffer *old_fb)
6119 {
6120         struct drm_device *dev = crtc->dev;
6121         struct drm_i915_private *dev_priv = dev->dev_private;
6122         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6123         int pipe = intel_crtc->pipe;
6124         int ret;
6125
6126         drm_vblank_pre_modeset(dev, pipe);
6127
6128         ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6129                                               x, y, old_fb);
6130         drm_vblank_post_modeset(dev, pipe);
6131
6132         if (ret)
6133                 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6134         else
6135                 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6136
6137         return ret;
6138 }
6139
6140 static bool intel_eld_uptodate(struct drm_connector *connector,
6141                                int reg_eldv, uint32_t bits_eldv,
6142                                int reg_elda, uint32_t bits_elda,
6143                                int reg_edid)
6144 {
6145         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6146         uint8_t *eld = connector->eld;
6147         uint32_t i;
6148
6149         i = I915_READ(reg_eldv);
6150         i &= bits_eldv;
6151
6152         if (!eld[0])
6153                 return !i;
6154
6155         if (!i)
6156                 return false;
6157
6158         i = I915_READ(reg_elda);
6159         i &= ~bits_elda;
6160         I915_WRITE(reg_elda, i);
6161
6162         for (i = 0; i < eld[2]; i++)
6163                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6164                         return false;
6165
6166         return true;
6167 }
6168
6169 static void g4x_write_eld(struct drm_connector *connector,
6170                           struct drm_crtc *crtc)
6171 {
6172         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6173         uint8_t *eld = connector->eld;
6174         uint32_t eldv;
6175         uint32_t len;
6176         uint32_t i;
6177
6178         i = I915_READ(G4X_AUD_VID_DID);
6179
6180         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6181                 eldv = G4X_ELDV_DEVCL_DEVBLC;
6182         else
6183                 eldv = G4X_ELDV_DEVCTG;
6184
6185         if (intel_eld_uptodate(connector,
6186                                G4X_AUD_CNTL_ST, eldv,
6187                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6188                                G4X_HDMIW_HDMIEDID))
6189                 return;
6190
6191         i = I915_READ(G4X_AUD_CNTL_ST);
6192         i &= ~(eldv | G4X_ELD_ADDR);
6193         len = (i >> 9) & 0x1f;          /* ELD buffer size */
6194         I915_WRITE(G4X_AUD_CNTL_ST, i);
6195
6196         if (!eld[0])
6197                 return;
6198
6199         len = min_t(uint8_t, eld[2], len);
6200         DRM_DEBUG_DRIVER("ELD size %d\n", len);
6201         for (i = 0; i < len; i++)
6202                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6203
6204         i = I915_READ(G4X_AUD_CNTL_ST);
6205         i |= eldv;
6206         I915_WRITE(G4X_AUD_CNTL_ST, i);
6207 }
6208
6209 static void ironlake_write_eld(struct drm_connector *connector,
6210                                      struct drm_crtc *crtc)
6211 {
6212         struct drm_i915_private *dev_priv = connector->dev->dev_private;
6213         uint8_t *eld = connector->eld;
6214         uint32_t eldv;
6215         uint32_t i;
6216         int len;
6217         int hdmiw_hdmiedid;
6218         int aud_config;
6219         int aud_cntl_st;
6220         int aud_cntrl_st2;
6221
6222         if (HAS_PCH_IBX(connector->dev)) {
6223                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6224                 aud_config = IBX_AUD_CONFIG_A;
6225                 aud_cntl_st = IBX_AUD_CNTL_ST_A;
6226                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6227         } else {
6228                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6229                 aud_config = CPT_AUD_CONFIG_A;
6230                 aud_cntl_st = CPT_AUD_CNTL_ST_A;
6231                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6232         }
6233
6234         i = to_intel_crtc(crtc)->pipe;
6235         hdmiw_hdmiedid += i * 0x100;
6236         aud_cntl_st += i * 0x100;
6237         aud_config += i * 0x100;
6238
6239         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
6240
6241         i = I915_READ(aud_cntl_st);
6242         i = (i >> 29) & 0x3;            /* DIP_Port_Select, 0x1 = PortB */
6243         if (!i) {
6244                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6245                 /* operate blindly on all ports */
6246                 eldv = IBX_ELD_VALIDB;
6247                 eldv |= IBX_ELD_VALIDB << 4;
6248                 eldv |= IBX_ELD_VALIDB << 8;
6249         } else {
6250                 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
6251                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6252         }
6253
6254         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6255                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6256                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
6257                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6258         } else
6259                 I915_WRITE(aud_config, 0);
6260
6261         if (intel_eld_uptodate(connector,
6262                                aud_cntrl_st2, eldv,
6263                                aud_cntl_st, IBX_ELD_ADDRESS,
6264                                hdmiw_hdmiedid))
6265                 return;
6266
6267         i = I915_READ(aud_cntrl_st2);
6268         i &= ~eldv;
6269         I915_WRITE(aud_cntrl_st2, i);
6270
6271         if (!eld[0])
6272                 return;
6273
6274         i = I915_READ(aud_cntl_st);
6275         i &= ~IBX_ELD_ADDRESS;
6276         I915_WRITE(aud_cntl_st, i);
6277
6278         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
6279         DRM_DEBUG_DRIVER("ELD size %d\n", len);
6280         for (i = 0; i < len; i++)
6281                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6282
6283         i = I915_READ(aud_cntrl_st2);
6284         i |= eldv;
6285         I915_WRITE(aud_cntrl_st2, i);
6286 }
6287
6288 void intel_write_eld(struct drm_encoder *encoder,
6289                      struct drm_display_mode *mode)
6290 {
6291         struct drm_crtc *crtc = encoder->crtc;
6292         struct drm_connector *connector;
6293         struct drm_device *dev = encoder->dev;
6294         struct drm_i915_private *dev_priv = dev->dev_private;
6295
6296         connector = drm_select_eld(encoder, mode);
6297         if (!connector)
6298                 return;
6299
6300         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6301                          connector->base.id,
6302                          drm_get_connector_name(connector),
6303                          connector->encoder->base.id,
6304                          drm_get_encoder_name(connector->encoder));
6305
6306         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6307
6308         if (dev_priv->display.write_eld)
6309                 dev_priv->display.write_eld(connector, crtc);
6310 }
6311
6312 /** Loads the palette/gamma unit for the CRTC with the prepared values */
6313 void intel_crtc_load_lut(struct drm_crtc *crtc)
6314 {
6315         struct drm_device *dev = crtc->dev;
6316         struct drm_i915_private *dev_priv = dev->dev_private;
6317         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6318         int palreg = PALETTE(intel_crtc->pipe);
6319         int i;
6320
6321         /* The clocks have to be on to load the palette. */
6322         if (!crtc->enabled || !intel_crtc->active)
6323                 return;
6324
6325         /* use legacy palette for Ironlake */
6326         if (HAS_PCH_SPLIT(dev))
6327                 palreg = LGC_PALETTE(intel_crtc->pipe);
6328
6329         for (i = 0; i < 256; i++) {
6330                 I915_WRITE(palreg + 4 * i,
6331                            (intel_crtc->lut_r[i] << 16) |
6332                            (intel_crtc->lut_g[i] << 8) |
6333                            intel_crtc->lut_b[i]);
6334         }
6335 }
6336
6337 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6338 {
6339         struct drm_device *dev = crtc->dev;
6340         struct drm_i915_private *dev_priv = dev->dev_private;
6341         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6342         bool visible = base != 0;
6343         u32 cntl;
6344
6345         if (intel_crtc->cursor_visible == visible)
6346                 return;
6347
6348         cntl = I915_READ(_CURACNTR);
6349         if (visible) {
6350                 /* On these chipsets we can only modify the base whilst
6351                  * the cursor is disabled.
6352                  */
6353                 I915_WRITE(_CURABASE, base);
6354
6355                 cntl &= ~(CURSOR_FORMAT_MASK);
6356                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6357                 cntl |= CURSOR_ENABLE |
6358                         CURSOR_GAMMA_ENABLE |
6359                         CURSOR_FORMAT_ARGB;
6360         } else
6361                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6362         I915_WRITE(_CURACNTR, cntl);
6363
6364         intel_crtc->cursor_visible = visible;
6365 }
6366
6367 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6368 {
6369         struct drm_device *dev = crtc->dev;
6370         struct drm_i915_private *dev_priv = dev->dev_private;
6371         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6372         int pipe = intel_crtc->pipe;
6373         bool visible = base != 0;
6374
6375         if (intel_crtc->cursor_visible != visible) {
6376                 uint32_t cntl = I915_READ(CURCNTR(pipe));
6377                 if (base) {
6378                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6379                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6380                         cntl |= pipe << 28; /* Connect to correct pipe */
6381                 } else {
6382                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6383                         cntl |= CURSOR_MODE_DISABLE;
6384                 }
6385                 I915_WRITE(CURCNTR(pipe), cntl);
6386
6387                 intel_crtc->cursor_visible = visible;
6388         }
6389         /* and commit changes on next vblank */
6390         I915_WRITE(CURBASE(pipe), base);
6391 }
6392
6393 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6394 {
6395         struct drm_device *dev = crtc->dev;
6396         struct drm_i915_private *dev_priv = dev->dev_private;
6397         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6398         int pipe = intel_crtc->pipe;
6399         bool visible = base != 0;
6400
6401         if (intel_crtc->cursor_visible != visible) {
6402                 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6403                 if (base) {
6404                         cntl &= ~CURSOR_MODE;
6405                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6406                 } else {
6407                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6408                         cntl |= CURSOR_MODE_DISABLE;
6409                 }
6410                 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6411
6412                 intel_crtc->cursor_visible = visible;
6413         }
6414         /* and commit changes on next vblank */
6415         I915_WRITE(CURBASE_IVB(pipe), base);
6416 }
6417
6418 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6419 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6420                                      bool on)
6421 {
6422         struct drm_device *dev = crtc->dev;
6423         struct drm_i915_private *dev_priv = dev->dev_private;
6424         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6425         int pipe = intel_crtc->pipe;
6426         int x = intel_crtc->cursor_x;
6427         int y = intel_crtc->cursor_y;
6428         u32 base, pos;
6429         bool visible;
6430
6431         pos = 0;
6432
6433         if (on && crtc->enabled && crtc->fb) {
6434                 base = intel_crtc->cursor_addr;
6435                 if (x > (int) crtc->fb->width)
6436                         base = 0;
6437
6438                 if (y > (int) crtc->fb->height)
6439                         base = 0;
6440         } else
6441                 base = 0;
6442
6443         if (x < 0) {
6444                 if (x + intel_crtc->cursor_width < 0)
6445                         base = 0;
6446
6447                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6448                 x = -x;
6449         }
6450         pos |= x << CURSOR_X_SHIFT;
6451
6452         if (y < 0) {
6453                 if (y + intel_crtc->cursor_height < 0)
6454                         base = 0;
6455
6456                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6457                 y = -y;
6458         }
6459         pos |= y << CURSOR_Y_SHIFT;
6460
6461         visible = base != 0;
6462         if (!visible && !intel_crtc->cursor_visible)
6463                 return;
6464
6465         if (IS_IVYBRIDGE(dev)) {
6466                 I915_WRITE(CURPOS_IVB(pipe), pos);
6467                 ivb_update_cursor(crtc, base);
6468         } else {
6469                 I915_WRITE(CURPOS(pipe), pos);
6470                 if (IS_845G(dev) || IS_I865G(dev))
6471                         i845_update_cursor(crtc, base);
6472                 else
6473                         i9xx_update_cursor(crtc, base);
6474         }
6475
6476         if (visible)
6477                 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6478 }
6479
6480 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6481                                  struct drm_file *file,
6482                                  uint32_t handle,
6483                                  uint32_t width, uint32_t height)
6484 {
6485         struct drm_device *dev = crtc->dev;
6486         struct drm_i915_private *dev_priv = dev->dev_private;
6487         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6488         struct drm_i915_gem_object *obj;
6489         uint32_t addr;
6490         int ret;
6491
6492         DRM_DEBUG_KMS("\n");
6493
6494         /* if we want to turn off the cursor ignore width and height */
6495         if (!handle) {
6496                 DRM_DEBUG_KMS("cursor off\n");
6497                 addr = 0;
6498                 obj = NULL;
6499                 mutex_lock(&dev->struct_mutex);
6500                 goto finish;
6501         }
6502
6503         /* Currently we only support 64x64 cursors */
6504         if (width != 64 || height != 64) {
6505                 DRM_ERROR("we currently only support 64x64 cursors\n");
6506                 return -EINVAL;
6507         }
6508
6509         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6510         if (&obj->base == NULL)
6511                 return -ENOENT;
6512
6513         if (obj->base.size < width * height * 4) {
6514                 DRM_ERROR("buffer is to small\n");
6515                 ret = -ENOMEM;
6516                 goto fail;
6517         }
6518
6519         /* we only need to pin inside GTT if cursor is non-phy */
6520         mutex_lock(&dev->struct_mutex);
6521         if (!dev_priv->info->cursor_needs_physical) {
6522                 if (obj->tiling_mode) {
6523                         DRM_ERROR("cursor cannot be tiled\n");
6524                         ret = -EINVAL;
6525                         goto fail_locked;
6526                 }
6527
6528                 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6529                 if (ret) {
6530                         DRM_ERROR("failed to move cursor bo into the GTT\n");
6531                         goto fail_locked;
6532                 }
6533
6534                 ret = i915_gem_object_put_fence(obj);
6535                 if (ret) {
6536                         DRM_ERROR("failed to release fence for cursor");
6537                         goto fail_unpin;
6538                 }
6539
6540                 addr = obj->gtt_offset;
6541         } else {
6542                 int align = IS_I830(dev) ? 16 * 1024 : 256;
6543                 ret = i915_gem_attach_phys_object(dev, obj,
6544                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6545                                                   align);
6546                 if (ret) {
6547                         DRM_ERROR("failed to attach phys object\n");
6548                         goto fail_locked;
6549                 }
6550                 addr = obj->phys_obj->handle->busaddr;
6551         }
6552
6553         if (IS_GEN2(dev))
6554                 I915_WRITE(CURSIZE, (height << 12) | width);
6555
6556  finish:
6557         if (intel_crtc->cursor_bo) {
6558                 if (dev_priv->info->cursor_needs_physical) {
6559                         if (intel_crtc->cursor_bo != obj)
6560                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6561                 } else
6562                         i915_gem_object_unpin(intel_crtc->cursor_bo);
6563                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6564         }
6565
6566         mutex_unlock(&dev->struct_mutex);
6567
6568         intel_crtc->cursor_addr = addr;
6569         intel_crtc->cursor_bo = obj;
6570         intel_crtc->cursor_width = width;
6571         intel_crtc->cursor_height = height;
6572
6573         intel_crtc_update_cursor(crtc, true);
6574
6575         return 0;
6576 fail_unpin:
6577         i915_gem_object_unpin(obj);
6578 fail_locked:
6579         mutex_unlock(&dev->struct_mutex);
6580 fail:
6581         drm_gem_object_unreference_unlocked(&obj->base);
6582         return ret;
6583 }
6584
6585 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6586 {
6587         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6588
6589         intel_crtc->cursor_x = x;
6590         intel_crtc->cursor_y = y;
6591
6592         intel_crtc_update_cursor(crtc, true);
6593
6594         return 0;
6595 }
6596
6597 /** Sets the color ramps on behalf of RandR */
6598 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6599                                  u16 blue, int regno)
6600 {
6601         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6602
6603         intel_crtc->lut_r[regno] = red >> 8;
6604         intel_crtc->lut_g[regno] = green >> 8;
6605         intel_crtc->lut_b[regno] = blue >> 8;
6606 }
6607
6608 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6609                              u16 *blue, int regno)
6610 {
6611         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6612
6613         *red = intel_crtc->lut_r[regno] << 8;
6614         *green = intel_crtc->lut_g[regno] << 8;
6615         *blue = intel_crtc->lut_b[regno] << 8;
6616 }
6617
6618 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6619                                  u16 *blue, uint32_t start, uint32_t size)
6620 {
6621         int end = (start + size > 256) ? 256 : start + size, i;
6622         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6623
6624         for (i = start; i < end; i++) {
6625                 intel_crtc->lut_r[i] = red[i] >> 8;
6626                 intel_crtc->lut_g[i] = green[i] >> 8;
6627                 intel_crtc->lut_b[i] = blue[i] >> 8;
6628         }
6629
6630         intel_crtc_load_lut(crtc);
6631 }
6632
6633 /**
6634  * Get a pipe with a simple mode set on it for doing load-based monitor
6635  * detection.
6636  *
6637  * It will be up to the load-detect code to adjust the pipe as appropriate for
6638  * its requirements.  The pipe will be connected to no other encoders.
6639  *
6640  * Currently this code will only succeed if there is a pipe with no encoders
6641  * configured for it.  In the future, it could choose to temporarily disable
6642  * some outputs to free up a pipe for its use.
6643  *
6644  * \return crtc, or NULL if no pipes are available.
6645  */
6646
6647 /* VESA 640x480x72Hz mode to set on the pipe */
6648 static struct drm_display_mode load_detect_mode = {
6649         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6650                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6651 };
6652
6653 static struct drm_framebuffer *
6654 intel_framebuffer_create(struct drm_device *dev,
6655                          struct drm_mode_fb_cmd2 *mode_cmd,
6656                          struct drm_i915_gem_object *obj)
6657 {
6658         struct intel_framebuffer *intel_fb;
6659         int ret;
6660
6661         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6662         if (!intel_fb) {
6663                 drm_gem_object_unreference_unlocked(&obj->base);
6664                 return ERR_PTR(-ENOMEM);
6665         }
6666
6667         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6668         if (ret) {
6669                 drm_gem_object_unreference_unlocked(&obj->base);
6670                 kfree(intel_fb);
6671                 return ERR_PTR(ret);
6672         }
6673
6674         return &intel_fb->base;
6675 }
6676
6677 static u32
6678 intel_framebuffer_pitch_for_width(int width, int bpp)
6679 {
6680         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6681         return ALIGN(pitch, 64);
6682 }
6683
6684 static u32
6685 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6686 {
6687         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6688         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6689 }
6690
6691 static struct drm_framebuffer *
6692 intel_framebuffer_create_for_mode(struct drm_device *dev,
6693                                   struct drm_display_mode *mode,
6694                                   int depth, int bpp)
6695 {
6696         struct drm_i915_gem_object *obj;
6697         struct drm_mode_fb_cmd2 mode_cmd;
6698
6699         obj = i915_gem_alloc_object(dev,
6700                                     intel_framebuffer_size_for_mode(mode, bpp));
6701         if (obj == NULL)
6702                 return ERR_PTR(-ENOMEM);
6703
6704         mode_cmd.width = mode->hdisplay;
6705         mode_cmd.height = mode->vdisplay;
6706         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6707                                                                 bpp);
6708         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6709
6710         return intel_framebuffer_create(dev, &mode_cmd, obj);
6711 }
6712
6713 static struct drm_framebuffer *
6714 mode_fits_in_fbdev(struct drm_device *dev,
6715                    struct drm_display_mode *mode)
6716 {
6717         struct drm_i915_private *dev_priv = dev->dev_private;
6718         struct drm_i915_gem_object *obj;
6719         struct drm_framebuffer *fb;
6720
6721         if (dev_priv->fbdev == NULL)
6722                 return NULL;
6723
6724         obj = dev_priv->fbdev->ifb.obj;
6725         if (obj == NULL)
6726                 return NULL;
6727
6728         fb = &dev_priv->fbdev->ifb.base;
6729         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6730                                                                fb->bits_per_pixel))
6731                 return NULL;
6732
6733         if (obj->base.size < mode->vdisplay * fb->pitches[0])
6734                 return NULL;
6735
6736         return fb;
6737 }
6738
6739 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6740                                 struct drm_connector *connector,
6741                                 struct drm_display_mode *mode,
6742                                 struct intel_load_detect_pipe *old)
6743 {
6744         struct intel_crtc *intel_crtc;
6745         struct drm_crtc *possible_crtc;
6746         struct drm_encoder *encoder = &intel_encoder->base;
6747         struct drm_crtc *crtc = NULL;
6748         struct drm_device *dev = encoder->dev;
6749         struct drm_framebuffer *old_fb;
6750         int i = -1;
6751
6752         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6753                       connector->base.id, drm_get_connector_name(connector),
6754                       encoder->base.id, drm_get_encoder_name(encoder));
6755
6756         /*
6757          * Algorithm gets a little messy:
6758          *
6759          *   - if the connector already has an assigned crtc, use it (but make
6760          *     sure it's on first)
6761          *
6762          *   - try to find the first unused crtc that can drive this connector,
6763          *     and use that if we find one
6764          */
6765
6766         /* See if we already have a CRTC for this connector */
6767         if (encoder->crtc) {
6768                 crtc = encoder->crtc;
6769
6770                 intel_crtc = to_intel_crtc(crtc);
6771                 old->dpms_mode = intel_crtc->dpms_mode;
6772                 old->load_detect_temp = false;
6773
6774                 /* Make sure the crtc and connector are running */
6775                 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6776                         struct drm_encoder_helper_funcs *encoder_funcs;
6777                         struct drm_crtc_helper_funcs *crtc_funcs;
6778
6779                         crtc_funcs = crtc->helper_private;
6780                         crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6781
6782                         encoder_funcs = encoder->helper_private;
6783                         encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6784                 }
6785
6786                 return true;
6787         }
6788
6789         /* Find an unused one (if possible) */
6790         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6791                 i++;
6792                 if (!(encoder->possible_crtcs & (1 << i)))
6793                         continue;
6794                 if (!possible_crtc->enabled) {
6795                         crtc = possible_crtc;
6796                         break;
6797                 }
6798         }
6799
6800         /*
6801          * If we didn't find an unused CRTC, don't use any.
6802          */
6803         if (!crtc) {
6804                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6805                 return false;
6806         }
6807
6808         encoder->crtc = crtc;
6809         connector->encoder = encoder;
6810
6811         intel_crtc = to_intel_crtc(crtc);
6812         old->dpms_mode = intel_crtc->dpms_mode;
6813         old->load_detect_temp = true;
6814         old->release_fb = NULL;
6815
6816         if (!mode)
6817                 mode = &load_detect_mode;
6818
6819         old_fb = crtc->fb;
6820
6821         /* We need a framebuffer large enough to accommodate all accesses
6822          * that the plane may generate whilst we perform load detection.
6823          * We can not rely on the fbcon either being present (we get called
6824          * during its initialisation to detect all boot displays, or it may
6825          * not even exist) or that it is large enough to satisfy the
6826          * requested mode.
6827          */
6828         crtc->fb = mode_fits_in_fbdev(dev, mode);
6829         if (crtc->fb == NULL) {
6830                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6831                 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6832                 old->release_fb = crtc->fb;
6833         } else
6834                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6835         if (IS_ERR(crtc->fb)) {
6836                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6837                 crtc->fb = old_fb;
6838                 return false;
6839         }
6840
6841         if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6842                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6843                 if (old->release_fb)
6844                         old->release_fb->funcs->destroy(old->release_fb);
6845                 crtc->fb = old_fb;
6846                 return false;
6847         }
6848
6849         /* let the connector get through one full cycle before testing */
6850         intel_wait_for_vblank(dev, intel_crtc->pipe);
6851
6852         return true;
6853 }
6854
6855 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6856                                     struct drm_connector *connector,
6857                                     struct intel_load_detect_pipe *old)
6858 {
6859         struct drm_encoder *encoder = &intel_encoder->base;
6860         struct drm_device *dev = encoder->dev;
6861         struct drm_crtc *crtc = encoder->crtc;
6862         struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6863         struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6864
6865         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6866                       connector->base.id, drm_get_connector_name(connector),
6867                       encoder->base.id, drm_get_encoder_name(encoder));
6868
6869         if (old->load_detect_temp) {
6870                 connector->encoder = NULL;
6871                 drm_helper_disable_unused_functions(dev);
6872
6873                 if (old->release_fb)
6874                         old->release_fb->funcs->destroy(old->release_fb);
6875
6876                 return;
6877         }
6878
6879         /* Switch crtc and encoder back off if necessary */
6880         if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6881                 encoder_funcs->dpms(encoder, old->dpms_mode);
6882                 crtc_funcs->dpms(crtc, old->dpms_mode);
6883         }
6884 }
6885
6886 /* Returns the clock of the currently programmed mode of the given pipe. */
6887 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6888 {
6889         struct drm_i915_private *dev_priv = dev->dev_private;
6890         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6891         int pipe = intel_crtc->pipe;
6892         u32 dpll = I915_READ(DPLL(pipe));
6893         u32 fp;
6894         intel_clock_t clock;
6895
6896         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6897                 fp = I915_READ(FP0(pipe));
6898         else
6899                 fp = I915_READ(FP1(pipe));
6900
6901         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6902         if (IS_PINEVIEW(dev)) {
6903                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6904                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6905         } else {
6906                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6907                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6908         }
6909
6910         if (!IS_GEN2(dev)) {
6911                 if (IS_PINEVIEW(dev))
6912                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6913                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6914                 else
6915                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6916                                DPLL_FPA01_P1_POST_DIV_SHIFT);
6917
6918                 switch (dpll & DPLL_MODE_MASK) {
6919                 case DPLLB_MODE_DAC_SERIAL:
6920                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6921                                 5 : 10;
6922                         break;
6923                 case DPLLB_MODE_LVDS:
6924                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6925                                 7 : 14;
6926                         break;
6927                 default:
6928                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6929                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
6930                         return 0;
6931                 }
6932
6933                 /* XXX: Handle the 100Mhz refclk */
6934                 intel_clock(dev, 96000, &clock);
6935         } else {
6936                 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6937
6938                 if (is_lvds) {
6939                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6940                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
6941                         clock.p2 = 14;
6942
6943                         if ((dpll & PLL_REF_INPUT_MASK) ==
6944                             PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6945                                 /* XXX: might not be 66MHz */
6946                                 intel_clock(dev, 66000, &clock);
6947                         } else
6948                                 intel_clock(dev, 48000, &clock);
6949                 } else {
6950                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
6951                                 clock.p1 = 2;
6952                         else {
6953                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6954                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6955                         }
6956                         if (dpll & PLL_P2_DIVIDE_BY_4)
6957                                 clock.p2 = 4;
6958                         else
6959                                 clock.p2 = 2;
6960
6961                         intel_clock(dev, 48000, &clock);
6962                 }
6963         }
6964
6965         /* XXX: It would be nice to validate the clocks, but we can't reuse
6966          * i830PllIsValid() because it relies on the xf86_config connector
6967          * configuration being accurate, which it isn't necessarily.
6968          */
6969
6970         return clock.dot;
6971 }
6972
6973 /** Returns the currently programmed mode of the given pipe. */
6974 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6975                                              struct drm_crtc *crtc)
6976 {
6977         struct drm_i915_private *dev_priv = dev->dev_private;
6978         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6979         int pipe = intel_crtc->pipe;
6980         struct drm_display_mode *mode;
6981         int htot = I915_READ(HTOTAL(pipe));
6982         int hsync = I915_READ(HSYNC(pipe));
6983         int vtot = I915_READ(VTOTAL(pipe));
6984         int vsync = I915_READ(VSYNC(pipe));
6985
6986         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6987         if (!mode)
6988                 return NULL;
6989
6990         mode->clock = intel_crtc_clock_get(dev, crtc);
6991         mode->hdisplay = (htot & 0xffff) + 1;
6992         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6993         mode->hsync_start = (hsync & 0xffff) + 1;
6994         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6995         mode->vdisplay = (vtot & 0xffff) + 1;
6996         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6997         mode->vsync_start = (vsync & 0xffff) + 1;
6998         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6999
7000         drm_mode_set_name(mode);
7001         drm_mode_set_crtcinfo(mode, 0);
7002
7003         return mode;
7004 }
7005
7006 #define GPU_IDLE_TIMEOUT 400 /* ms */
7007
7008 /* When this timer fires, we've been idle for awhile */
7009 static void intel_gpu_idle_timer(unsigned long arg)
7010 {
7011         struct drm_device *dev = (struct drm_device *)arg;
7012         drm_i915_private_t *dev_priv = dev->dev_private;
7013
7014         if (!list_empty(&dev_priv->mm.active_list)) {
7015                 /* Still processing requests, so just re-arm the timer. */
7016                 mod_timer(&dev_priv->idle_timer, jiffies +
7017                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7018                 return;
7019         }
7020
7021         dev_priv->busy = false;
7022         queue_work(dev_priv->wq, &dev_priv->idle_work);
7023 }
7024
7025 #define CRTC_IDLE_TIMEOUT 700 /* ms */
7026
7027 static void intel_crtc_idle_timer(unsigned long arg)
7028 {
7029         struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
7030         struct drm_crtc *crtc = &intel_crtc->base;
7031         drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7032         struct intel_framebuffer *intel_fb;
7033
7034         intel_fb = to_intel_framebuffer(crtc->fb);
7035         if (intel_fb && intel_fb->obj->active) {
7036                 /* The framebuffer is still being accessed by the GPU. */
7037                 mod_timer(&intel_crtc->idle_timer, jiffies +
7038                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7039                 return;
7040         }
7041
7042         intel_crtc->busy = false;
7043         queue_work(dev_priv->wq, &dev_priv->idle_work);
7044 }
7045
7046 static void intel_increase_pllclock(struct drm_crtc *crtc)
7047 {
7048         struct drm_device *dev = crtc->dev;
7049         drm_i915_private_t *dev_priv = dev->dev_private;
7050         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7051         int pipe = intel_crtc->pipe;
7052         int dpll_reg = DPLL(pipe);
7053         int dpll;
7054
7055         if (HAS_PCH_SPLIT(dev))
7056                 return;
7057
7058         if (!dev_priv->lvds_downclock_avail)
7059                 return;
7060
7061         dpll = I915_READ(dpll_reg);
7062         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7063                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
7064
7065                 assert_panel_unlocked(dev_priv, pipe);
7066
7067                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7068                 I915_WRITE(dpll_reg, dpll);
7069                 intel_wait_for_vblank(dev, pipe);
7070
7071                 dpll = I915_READ(dpll_reg);
7072                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
7073                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7074         }
7075
7076         /* Schedule downclock */
7077         mod_timer(&intel_crtc->idle_timer, jiffies +
7078                   msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7079 }
7080
7081 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7082 {
7083         struct drm_device *dev = crtc->dev;
7084         drm_i915_private_t *dev_priv = dev->dev_private;
7085         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7086
7087         if (HAS_PCH_SPLIT(dev))
7088                 return;
7089
7090         if (!dev_priv->lvds_downclock_avail)
7091                 return;
7092
7093         /*
7094          * Since this is called by a timer, we should never get here in
7095          * the manual case.
7096          */
7097         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7098                 int pipe = intel_crtc->pipe;
7099                 int dpll_reg = DPLL(pipe);
7100                 u32 dpll;
7101
7102                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
7103
7104                 assert_panel_unlocked(dev_priv, pipe);
7105
7106                 dpll = I915_READ(dpll_reg);
7107                 dpll |= DISPLAY_RATE_SELECT_FPA1;
7108                 I915_WRITE(dpll_reg, dpll);
7109                 intel_wait_for_vblank(dev, pipe);
7110                 dpll = I915_READ(dpll_reg);
7111                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7112                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7113         }
7114 }
7115
7116 /**
7117  * intel_idle_update - adjust clocks for idleness
7118  * @work: work struct
7119  *
7120  * Either the GPU or display (or both) went idle.  Check the busy status
7121  * here and adjust the CRTC and GPU clocks as necessary.
7122  */
7123 static void intel_idle_update(struct work_struct *work)
7124 {
7125         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
7126                                                     idle_work);
7127         struct drm_device *dev = dev_priv->dev;
7128         struct drm_crtc *crtc;
7129         struct intel_crtc *intel_crtc;
7130
7131         if (!i915_powersave)
7132                 return;
7133
7134         mutex_lock(&dev->struct_mutex);
7135
7136         i915_update_gfx_val(dev_priv);
7137
7138         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7139                 /* Skip inactive CRTCs */
7140                 if (!crtc->fb)
7141                         continue;
7142
7143                 intel_crtc = to_intel_crtc(crtc);
7144                 if (!intel_crtc->busy)
7145                         intel_decrease_pllclock(crtc);
7146         }
7147
7148
7149         mutex_unlock(&dev->struct_mutex);
7150 }
7151
7152 /**
7153  * intel_mark_busy - mark the GPU and possibly the display busy
7154  * @dev: drm device
7155  * @obj: object we're operating on
7156  *
7157  * Callers can use this function to indicate that the GPU is busy processing
7158  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7159  * buffer), we'll also mark the display as busy, so we know to increase its
7160  * clock frequency.
7161  */
7162 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7163 {
7164         drm_i915_private_t *dev_priv = dev->dev_private;
7165         struct drm_crtc *crtc = NULL;
7166         struct intel_framebuffer *intel_fb;
7167         struct intel_crtc *intel_crtc;
7168
7169         if (!drm_core_check_feature(dev, DRIVER_MODESET))
7170                 return;
7171
7172         if (!dev_priv->busy)
7173                 dev_priv->busy = true;
7174         else
7175                 mod_timer(&dev_priv->idle_timer, jiffies +
7176                           msecs_to_jiffies(GPU_IDLE_TIMEOUT));
7177
7178         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7179                 if (!crtc->fb)
7180                         continue;
7181
7182                 intel_crtc = to_intel_crtc(crtc);
7183                 intel_fb = to_intel_framebuffer(crtc->fb);
7184                 if (intel_fb->obj == obj) {
7185                         if (!intel_crtc->busy) {
7186                                 /* Non-busy -> busy, upclock */
7187                                 intel_increase_pllclock(crtc);
7188                                 intel_crtc->busy = true;
7189                         } else {
7190                                 /* Busy -> busy, put off timer */
7191                                 mod_timer(&intel_crtc->idle_timer, jiffies +
7192                                           msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
7193                         }
7194                 }
7195         }
7196 }
7197
7198 static void intel_crtc_destroy(struct drm_crtc *crtc)
7199 {
7200         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7201         struct drm_device *dev = crtc->dev;
7202         struct intel_unpin_work *work;
7203         unsigned long flags;
7204
7205         spin_lock_irqsave(&dev->event_lock, flags);
7206         work = intel_crtc->unpin_work;
7207         intel_crtc->unpin_work = NULL;
7208         spin_unlock_irqrestore(&dev->event_lock, flags);
7209
7210         if (work) {
7211                 cancel_work_sync(&work->work);
7212                 kfree(work);
7213         }
7214
7215         drm_crtc_cleanup(crtc);
7216
7217         kfree(intel_crtc);
7218 }
7219
7220 static void intel_unpin_work_fn(struct work_struct *__work)
7221 {
7222         struct intel_unpin_work *work =
7223                 container_of(__work, struct intel_unpin_work, work);
7224
7225         mutex_lock(&work->dev->struct_mutex);
7226         intel_unpin_fb_obj(work->old_fb_obj);
7227         drm_gem_object_unreference(&work->pending_flip_obj->base);
7228         drm_gem_object_unreference(&work->old_fb_obj->base);
7229
7230         intel_update_fbc(work->dev);
7231         mutex_unlock(&work->dev->struct_mutex);
7232         kfree(work);
7233 }
7234
7235 static void do_intel_finish_page_flip(struct drm_device *dev,
7236                                       struct drm_crtc *crtc)
7237 {
7238         drm_i915_private_t *dev_priv = dev->dev_private;
7239         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7240         struct intel_unpin_work *work;
7241         struct drm_i915_gem_object *obj;
7242         struct drm_pending_vblank_event *e;
7243         struct timeval tnow, tvbl;
7244         unsigned long flags;
7245
7246         /* Ignore early vblank irqs */
7247         if (intel_crtc == NULL)
7248                 return;
7249
7250         do_gettimeofday(&tnow);
7251
7252         spin_lock_irqsave(&dev->event_lock, flags);
7253         work = intel_crtc->unpin_work;
7254         if (work == NULL || !work->pending) {
7255                 spin_unlock_irqrestore(&dev->event_lock, flags);
7256                 return;
7257         }
7258
7259         intel_crtc->unpin_work = NULL;
7260
7261         if (work->event) {
7262                 e = work->event;
7263                 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7264
7265                 /* Called before vblank count and timestamps have
7266                  * been updated for the vblank interval of flip
7267                  * completion? Need to increment vblank count and
7268                  * add one videorefresh duration to returned timestamp
7269                  * to account for this. We assume this happened if we
7270                  * get called over 0.9 frame durations after the last
7271                  * timestamped vblank.
7272                  *
7273                  * This calculation can not be used with vrefresh rates
7274                  * below 5Hz (10Hz to be on the safe side) without
7275                  * promoting to 64 integers.
7276                  */
7277                 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7278                     9 * crtc->framedur_ns) {
7279                         e->event.sequence++;
7280                         tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7281                                              crtc->framedur_ns);
7282                 }
7283
7284                 e->event.tv_sec = tvbl.tv_sec;
7285                 e->event.tv_usec = tvbl.tv_usec;
7286
7287                 list_add_tail(&e->base.link,
7288                               &e->base.file_priv->event_list);
7289                 wake_up_interruptible(&e->base.file_priv->event_wait);
7290         }
7291
7292         drm_vblank_put(dev, intel_crtc->pipe);
7293
7294         spin_unlock_irqrestore(&dev->event_lock, flags);
7295
7296         obj = work->old_fb_obj;
7297
7298         atomic_clear_mask(1 << intel_crtc->plane,
7299                           &obj->pending_flip.counter);
7300         if (atomic_read(&obj->pending_flip) == 0)
7301                 wake_up(&dev_priv->pending_flip_queue);
7302
7303         schedule_work(&work->work);
7304
7305         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
7306 }
7307
7308 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7309 {
7310         drm_i915_private_t *dev_priv = dev->dev_private;
7311         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7312
7313         do_intel_finish_page_flip(dev, crtc);
7314 }
7315
7316 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7317 {
7318         drm_i915_private_t *dev_priv = dev->dev_private;
7319         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7320
7321         do_intel_finish_page_flip(dev, crtc);
7322 }
7323
7324 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7325 {
7326         drm_i915_private_t *dev_priv = dev->dev_private;
7327         struct intel_crtc *intel_crtc =
7328                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7329         unsigned long flags;
7330
7331         spin_lock_irqsave(&dev->event_lock, flags);
7332         if (intel_crtc->unpin_work) {
7333                 if ((++intel_crtc->unpin_work->pending) > 1)
7334                         DRM_ERROR("Prepared flip multiple times\n");
7335         } else {
7336                 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
7337         }
7338         spin_unlock_irqrestore(&dev->event_lock, flags);
7339 }
7340
7341 static int intel_gen2_queue_flip(struct drm_device *dev,
7342                                  struct drm_crtc *crtc,
7343                                  struct drm_framebuffer *fb,
7344                                  struct drm_i915_gem_object *obj)
7345 {
7346         struct drm_i915_private *dev_priv = dev->dev_private;
7347         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7348         unsigned long offset;
7349         u32 flip_mask;
7350         int ret;
7351
7352         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7353         if (ret)
7354                 goto out;
7355
7356         /* Offset into the new buffer for cases of shared fbs between CRTCs */
7357         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7358
7359         ret = BEGIN_LP_RING(6);
7360         if (ret)
7361                 goto out;
7362
7363         /* Can't queue multiple flips, so wait for the previous
7364          * one to finish before executing the next.
7365          */
7366         if (intel_crtc->plane)
7367                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7368         else
7369                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7370         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7371         OUT_RING(MI_NOOP);
7372         OUT_RING(MI_DISPLAY_FLIP |
7373                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7374         OUT_RING(fb->pitches[0]);
7375         OUT_RING(obj->gtt_offset + offset);
7376         OUT_RING(0); /* aux display base address, unused */
7377         ADVANCE_LP_RING();
7378 out:
7379         return ret;
7380 }
7381
7382 static int intel_gen3_queue_flip(struct drm_device *dev,
7383                                  struct drm_crtc *crtc,
7384                                  struct drm_framebuffer *fb,
7385                                  struct drm_i915_gem_object *obj)
7386 {
7387         struct drm_i915_private *dev_priv = dev->dev_private;
7388         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7389         unsigned long offset;
7390         u32 flip_mask;
7391         int ret;
7392
7393         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7394         if (ret)
7395                 goto out;
7396
7397         /* Offset into the new buffer for cases of shared fbs between CRTCs */
7398         offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7399
7400         ret = BEGIN_LP_RING(6);
7401         if (ret)
7402                 goto out;
7403
7404         if (intel_crtc->plane)
7405                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7406         else
7407                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7408         OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7409         OUT_RING(MI_NOOP);
7410         OUT_RING(MI_DISPLAY_FLIP_I915 |
7411                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7412         OUT_RING(fb->pitches[0]);
7413         OUT_RING(obj->gtt_offset + offset);
7414         OUT_RING(MI_NOOP);
7415
7416         ADVANCE_LP_RING();
7417 out:
7418         return ret;
7419 }
7420
7421 static int intel_gen4_queue_flip(struct drm_device *dev,
7422                                  struct drm_crtc *crtc,
7423                                  struct drm_framebuffer *fb,
7424                                  struct drm_i915_gem_object *obj)
7425 {
7426         struct drm_i915_private *dev_priv = dev->dev_private;
7427         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7428         uint32_t pf, pipesrc;
7429         int ret;
7430
7431         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7432         if (ret)
7433                 goto out;
7434
7435         ret = BEGIN_LP_RING(4);
7436         if (ret)
7437                 goto out;
7438
7439         /* i965+ uses the linear or tiled offsets from the
7440          * Display Registers (which do not change across a page-flip)
7441          * so we need only reprogram the base address.
7442          */
7443         OUT_RING(MI_DISPLAY_FLIP |
7444                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7445         OUT_RING(fb->pitches[0]);
7446         OUT_RING(obj->gtt_offset | obj->tiling_mode);
7447
7448         /* XXX Enabling the panel-fitter across page-flip is so far
7449          * untested on non-native modes, so ignore it for now.
7450          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7451          */
7452         pf = 0;
7453         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7454         OUT_RING(pf | pipesrc);
7455         ADVANCE_LP_RING();
7456 out:
7457         return ret;
7458 }
7459
7460 static int intel_gen6_queue_flip(struct drm_device *dev,
7461                                  struct drm_crtc *crtc,
7462                                  struct drm_framebuffer *fb,
7463                                  struct drm_i915_gem_object *obj)
7464 {
7465         struct drm_i915_private *dev_priv = dev->dev_private;
7466         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7467         uint32_t pf, pipesrc;
7468         int ret;
7469
7470         ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7471         if (ret)
7472                 goto out;
7473
7474         ret = BEGIN_LP_RING(4);
7475         if (ret)
7476                 goto out;
7477
7478         OUT_RING(MI_DISPLAY_FLIP |
7479                  MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7480         OUT_RING(fb->pitches[0] | obj->tiling_mode);
7481         OUT_RING(obj->gtt_offset);
7482
7483         /* Contrary to the suggestions in the documentation,
7484          * "Enable Panel Fitter" does not seem to be required when page
7485          * flipping with a non-native mode, and worse causes a normal
7486          * modeset to fail.
7487          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7488          */
7489         pf = 0;
7490         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7491         OUT_RING(pf | pipesrc);
7492         ADVANCE_LP_RING();
7493 out:
7494         return ret;
7495 }
7496
7497 /*
7498  * On gen7 we currently use the blit ring because (in early silicon at least)
7499  * the render ring doesn't give us interrpts for page flip completion, which
7500  * means clients will hang after the first flip is queued.  Fortunately the
7501  * blit ring generates interrupts properly, so use it instead.
7502  */
7503 static int intel_gen7_queue_flip(struct drm_device *dev,
7504                                  struct drm_crtc *crtc,
7505                                  struct drm_framebuffer *fb,
7506                                  struct drm_i915_gem_object *obj)
7507 {
7508         struct drm_i915_private *dev_priv = dev->dev_private;
7509         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7510         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7511         int ret;
7512
7513         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7514         if (ret)
7515                 goto out;
7516
7517         ret = intel_ring_begin(ring, 4);
7518         if (ret)
7519                 goto out;
7520
7521         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7522         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7523         intel_ring_emit(ring, (obj->gtt_offset));
7524         intel_ring_emit(ring, (MI_NOOP));
7525         intel_ring_advance(ring);
7526 out:
7527         return ret;
7528 }
7529
7530 static int intel_default_queue_flip(struct drm_device *dev,
7531                                     struct drm_crtc *crtc,
7532                                     struct drm_framebuffer *fb,
7533                                     struct drm_i915_gem_object *obj)
7534 {
7535         return -ENODEV;
7536 }
7537
7538 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7539                                 struct drm_framebuffer *fb,
7540                                 struct drm_pending_vblank_event *event)
7541 {
7542         struct drm_device *dev = crtc->dev;
7543         struct drm_i915_private *dev_priv = dev->dev_private;
7544         struct intel_framebuffer *intel_fb;
7545         struct drm_i915_gem_object *obj;
7546         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7547         struct intel_unpin_work *work;
7548         unsigned long flags;
7549         int ret;
7550
7551         work = kzalloc(sizeof *work, GFP_KERNEL);
7552         if (work == NULL)
7553                 return -ENOMEM;
7554
7555         work->event = event;
7556         work->dev = crtc->dev;
7557         intel_fb = to_intel_framebuffer(crtc->fb);
7558         work->old_fb_obj = intel_fb->obj;
7559         INIT_WORK(&work->work, intel_unpin_work_fn);
7560
7561         ret = drm_vblank_get(dev, intel_crtc->pipe);
7562         if (ret)
7563                 goto free_work;
7564
7565         /* We borrow the event spin lock for protecting unpin_work */
7566         spin_lock_irqsave(&dev->event_lock, flags);
7567         if (intel_crtc->unpin_work) {
7568                 spin_unlock_irqrestore(&dev->event_lock, flags);
7569                 kfree(work);
7570                 drm_vblank_put(dev, intel_crtc->pipe);
7571
7572                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7573                 return -EBUSY;
7574         }
7575         intel_crtc->unpin_work = work;
7576         spin_unlock_irqrestore(&dev->event_lock, flags);
7577
7578         intel_fb = to_intel_framebuffer(fb);
7579         obj = intel_fb->obj;
7580
7581         mutex_lock(&dev->struct_mutex);
7582
7583         /* Reference the objects for the scheduled work. */
7584         drm_gem_object_reference(&work->old_fb_obj->base);
7585         drm_gem_object_reference(&obj->base);
7586
7587         crtc->fb = fb;
7588
7589         work->pending_flip_obj = obj;
7590
7591         work->enable_stall_check = true;
7592
7593         /* Block clients from rendering to the new back buffer until
7594          * the flip occurs and the object is no longer visible.
7595          */
7596         atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7597
7598         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7599         if (ret)
7600                 goto cleanup_pending;
7601
7602         intel_disable_fbc(dev);
7603         intel_mark_busy(dev, intel_fb->obj);
7604         mutex_unlock(&dev->struct_mutex);
7605
7606         trace_i915_flip_request(intel_crtc->plane, obj);
7607
7608         return 0;
7609
7610 cleanup_pending:
7611         atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7612         drm_gem_object_unreference(&work->old_fb_obj->base);
7613         drm_gem_object_unreference(&obj->base);
7614         mutex_unlock(&dev->struct_mutex);
7615
7616         spin_lock_irqsave(&dev->event_lock, flags);
7617         intel_crtc->unpin_work = NULL;
7618         spin_unlock_irqrestore(&dev->event_lock, flags);
7619
7620         drm_vblank_put(dev, intel_crtc->pipe);
7621 free_work:
7622         kfree(work);
7623
7624         return ret;
7625 }
7626
7627 static void intel_sanitize_modesetting(struct drm_device *dev,
7628                                        int pipe, int plane)
7629 {
7630         struct drm_i915_private *dev_priv = dev->dev_private;
7631         u32 reg, val;
7632
7633         /* Clear any frame start delays used for debugging left by the BIOS */
7634         for_each_pipe(pipe) {
7635                 reg = PIPECONF(pipe);
7636                 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7637         }
7638
7639         if (HAS_PCH_SPLIT(dev))
7640                 return;
7641
7642         /* Who knows what state these registers were left in by the BIOS or
7643          * grub?
7644          *
7645          * If we leave the registers in a conflicting state (e.g. with the
7646          * display plane reading from the other pipe than the one we intend
7647          * to use) then when we attempt to teardown the active mode, we will
7648          * not disable the pipes and planes in the correct order -- leaving
7649          * a plane reading from a disabled pipe and possibly leading to
7650          * undefined behaviour.
7651          */
7652
7653         reg = DSPCNTR(plane);
7654         val = I915_READ(reg);
7655
7656         if ((val & DISPLAY_PLANE_ENABLE) == 0)
7657                 return;
7658         if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7659                 return;
7660
7661         /* This display plane is active and attached to the other CPU pipe. */
7662         pipe = !pipe;
7663
7664         /* Disable the plane and wait for it to stop reading from the pipe. */
7665         intel_disable_plane(dev_priv, plane, pipe);
7666         intel_disable_pipe(dev_priv, pipe);
7667 }
7668
7669 static void intel_crtc_reset(struct drm_crtc *crtc)
7670 {
7671         struct drm_device *dev = crtc->dev;
7672         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7673
7674         /* Reset flags back to the 'unknown' status so that they
7675          * will be correctly set on the initial modeset.
7676          */
7677         intel_crtc->dpms_mode = -1;
7678
7679         /* We need to fix up any BIOS configuration that conflicts with
7680          * our expectations.
7681          */
7682         intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7683 }
7684
7685 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7686         .dpms = intel_crtc_dpms,
7687         .mode_fixup = intel_crtc_mode_fixup,
7688         .mode_set = intel_crtc_mode_set,
7689         .mode_set_base = intel_pipe_set_base,
7690         .mode_set_base_atomic = intel_pipe_set_base_atomic,
7691         .load_lut = intel_crtc_load_lut,
7692         .disable = intel_crtc_disable,
7693 };
7694
7695 static const struct drm_crtc_funcs intel_crtc_funcs = {
7696         .reset = intel_crtc_reset,
7697         .cursor_set = intel_crtc_cursor_set,
7698         .cursor_move = intel_crtc_cursor_move,
7699         .gamma_set = intel_crtc_gamma_set,
7700         .set_config = drm_crtc_helper_set_config,
7701         .destroy = intel_crtc_destroy,
7702         .page_flip = intel_crtc_page_flip,
7703 };
7704
7705 static void intel_crtc_init(struct drm_device *dev, int pipe)
7706 {
7707         drm_i915_private_t *dev_priv = dev->dev_private;
7708         struct intel_crtc *intel_crtc;
7709         int i;
7710
7711         intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7712         if (intel_crtc == NULL)
7713                 return;
7714
7715         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7716
7717         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7718         for (i = 0; i < 256; i++) {
7719                 intel_crtc->lut_r[i] = i;
7720                 intel_crtc->lut_g[i] = i;
7721                 intel_crtc->lut_b[i] = i;
7722         }
7723
7724         /* Swap pipes & planes for FBC on pre-965 */
7725         intel_crtc->pipe = pipe;
7726         intel_crtc->plane = pipe;
7727         if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7728                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7729                 intel_crtc->plane = !pipe;
7730         }
7731
7732         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7733                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7734         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7735         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7736
7737         intel_crtc_reset(&intel_crtc->base);
7738         intel_crtc->active = true; /* force the pipe off on setup_init_config */
7739         intel_crtc->bpp = 24; /* default for pre-Ironlake */
7740
7741         if (HAS_PCH_SPLIT(dev)) {
7742                 if (pipe == 2 && IS_IVYBRIDGE(dev))
7743                         intel_crtc->no_pll = true;
7744                 intel_helper_funcs.prepare = ironlake_crtc_prepare;
7745                 intel_helper_funcs.commit = ironlake_crtc_commit;
7746         } else {
7747                 intel_helper_funcs.prepare = i9xx_crtc_prepare;
7748                 intel_helper_funcs.commit = i9xx_crtc_commit;
7749         }
7750
7751         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7752
7753         intel_crtc->busy = false;
7754
7755         setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
7756                     (unsigned long)intel_crtc);
7757 }
7758
7759 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7760                                 struct drm_file *file)
7761 {
7762         drm_i915_private_t *dev_priv = dev->dev_private;
7763         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7764         struct drm_mode_object *drmmode_obj;
7765         struct intel_crtc *crtc;
7766
7767         if (!dev_priv) {
7768                 DRM_ERROR("called with no initialization\n");
7769                 return -EINVAL;
7770         }
7771
7772         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7773                         DRM_MODE_OBJECT_CRTC);
7774
7775         if (!drmmode_obj) {
7776                 DRM_ERROR("no such CRTC id\n");
7777                 return -EINVAL;
7778         }
7779
7780         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7781         pipe_from_crtc_id->pipe = crtc->pipe;
7782
7783         return 0;
7784 }
7785
7786 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7787 {
7788         struct intel_encoder *encoder;
7789         int index_mask = 0;
7790         int entry = 0;
7791
7792         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7793                 if (type_mask & encoder->clone_mask)
7794                         index_mask |= (1 << entry);
7795                 entry++;
7796         }
7797
7798         return index_mask;
7799 }
7800
7801 static bool has_edp_a(struct drm_device *dev)
7802 {
7803         struct drm_i915_private *dev_priv = dev->dev_private;
7804
7805         if (!IS_MOBILE(dev))
7806                 return false;
7807
7808         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7809                 return false;
7810
7811         if (IS_GEN5(dev) &&
7812             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7813                 return false;
7814
7815         return true;
7816 }
7817
7818 static void intel_setup_outputs(struct drm_device *dev)
7819 {
7820         struct drm_i915_private *dev_priv = dev->dev_private;
7821         struct intel_encoder *encoder;
7822         bool dpd_is_edp = false;
7823         bool has_lvds;
7824
7825         has_lvds = intel_lvds_init(dev);
7826         if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7827                 /* disable the panel fitter on everything but LVDS */
7828                 I915_WRITE(PFIT_CONTROL, 0);
7829         }
7830
7831         if (HAS_PCH_SPLIT(dev)) {
7832                 dpd_is_edp = intel_dpd_is_edp(dev);
7833
7834                 if (has_edp_a(dev))
7835                         intel_dp_init(dev, DP_A);
7836
7837                 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7838                         intel_dp_init(dev, PCH_DP_D);
7839         }
7840
7841         intel_crt_init(dev);
7842
7843         if (HAS_PCH_SPLIT(dev)) {
7844                 int found;
7845
7846                 if (I915_READ(HDMIB) & PORT_DETECTED) {
7847                         /* PCH SDVOB multiplex with HDMIB */
7848                         found = intel_sdvo_init(dev, PCH_SDVOB);
7849                         if (!found)
7850                                 intel_hdmi_init(dev, HDMIB);
7851                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7852                                 intel_dp_init(dev, PCH_DP_B);
7853                 }
7854
7855                 if (I915_READ(HDMIC) & PORT_DETECTED)
7856                         intel_hdmi_init(dev, HDMIC);
7857
7858                 if (I915_READ(HDMID) & PORT_DETECTED)
7859                         intel_hdmi_init(dev, HDMID);
7860
7861                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
7862                         intel_dp_init(dev, PCH_DP_C);
7863
7864                 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7865                         intel_dp_init(dev, PCH_DP_D);
7866
7867         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7868                 bool found = false;
7869
7870                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7871                         DRM_DEBUG_KMS("probing SDVOB\n");
7872                         found = intel_sdvo_init(dev, SDVOB);
7873                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7874                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7875                                 intel_hdmi_init(dev, SDVOB);
7876                         }
7877
7878                         if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7879                                 DRM_DEBUG_KMS("probing DP_B\n");
7880                                 intel_dp_init(dev, DP_B);
7881                         }
7882                 }
7883
7884                 /* Before G4X SDVOC doesn't have its own detect register */
7885
7886                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7887                         DRM_DEBUG_KMS("probing SDVOC\n");
7888                         found = intel_sdvo_init(dev, SDVOC);
7889                 }
7890
7891                 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7892
7893                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7894                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7895                                 intel_hdmi_init(dev, SDVOC);
7896                         }
7897                         if (SUPPORTS_INTEGRATED_DP(dev)) {
7898                                 DRM_DEBUG_KMS("probing DP_C\n");
7899                                 intel_dp_init(dev, DP_C);
7900                         }
7901                 }
7902
7903                 if (SUPPORTS_INTEGRATED_DP(dev) &&
7904                     (I915_READ(DP_D) & DP_DETECTED)) {
7905                         DRM_DEBUG_KMS("probing DP_D\n");
7906                         intel_dp_init(dev, DP_D);
7907                 }
7908         } else if (IS_GEN2(dev))
7909                 intel_dvo_init(dev);
7910
7911         if (SUPPORTS_TV(dev))
7912                 intel_tv_init(dev);
7913
7914         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7915                 encoder->base.possible_crtcs = encoder->crtc_mask;
7916                 encoder->base.possible_clones =
7917                         intel_encoder_clones(dev, encoder->clone_mask);
7918         }
7919
7920         /* disable all the possible outputs/crtcs before entering KMS mode */
7921         drm_helper_disable_unused_functions(dev);
7922
7923         if (HAS_PCH_SPLIT(dev))
7924                 ironlake_init_pch_refclk(dev);
7925 }
7926
7927 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7928 {
7929         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7930
7931         drm_framebuffer_cleanup(fb);
7932         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7933
7934         kfree(intel_fb);
7935 }
7936
7937 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7938                                                 struct drm_file *file,
7939                                                 unsigned int *handle)
7940 {
7941         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7942         struct drm_i915_gem_object *obj = intel_fb->obj;
7943
7944         return drm_gem_handle_create(file, &obj->base, handle);
7945 }
7946
7947 static const struct drm_framebuffer_funcs intel_fb_funcs = {
7948         .destroy = intel_user_framebuffer_destroy,
7949         .create_handle = intel_user_framebuffer_create_handle,
7950 };
7951
7952 int intel_framebuffer_init(struct drm_device *dev,
7953                            struct intel_framebuffer *intel_fb,
7954                            struct drm_mode_fb_cmd2 *mode_cmd,
7955                            struct drm_i915_gem_object *obj)
7956 {
7957         int ret;
7958
7959         if (obj->tiling_mode == I915_TILING_Y)
7960                 return -EINVAL;
7961
7962         if (mode_cmd->pitches[0] & 63)
7963                 return -EINVAL;
7964
7965         switch (mode_cmd->pixel_format) {
7966         case DRM_FORMAT_RGB332:
7967         case DRM_FORMAT_RGB565:
7968         case DRM_FORMAT_XRGB8888:
7969         case DRM_FORMAT_XBGR8888:
7970         case DRM_FORMAT_ARGB8888:
7971         case DRM_FORMAT_XRGB2101010:
7972         case DRM_FORMAT_ARGB2101010:
7973                 /* RGB formats are common across chipsets */
7974                 break;
7975         case DRM_FORMAT_YUYV:
7976         case DRM_FORMAT_UYVY:
7977         case DRM_FORMAT_YVYU:
7978         case DRM_FORMAT_VYUY:
7979                 break;
7980         default:
7981                 DRM_DEBUG_KMS("unsupported pixel format %u\n",
7982                                 mode_cmd->pixel_format);
7983                 return -EINVAL;
7984         }
7985
7986         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7987         if (ret) {
7988                 DRM_ERROR("framebuffer init failed %d\n", ret);
7989                 return ret;
7990         }
7991
7992         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
7993         intel_fb->obj = obj;
7994         return 0;
7995 }
7996
7997 static struct drm_framebuffer *
7998 intel_user_framebuffer_create(struct drm_device *dev,
7999                               struct drm_file *filp,
8000                               struct drm_mode_fb_cmd2 *mode_cmd)
8001 {
8002         struct drm_i915_gem_object *obj;
8003
8004         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8005                                                 mode_cmd->handles[0]));
8006         if (&obj->base == NULL)
8007                 return ERR_PTR(-ENOENT);
8008
8009         return intel_framebuffer_create(dev, mode_cmd, obj);
8010 }
8011
8012 static const struct drm_mode_config_funcs intel_mode_funcs = {
8013         .fb_create = intel_user_framebuffer_create,
8014         .output_poll_changed = intel_fb_output_poll_changed,
8015 };
8016
8017 static struct drm_i915_gem_object *
8018 intel_alloc_context_page(struct drm_device *dev)
8019 {
8020         struct drm_i915_gem_object *ctx;
8021         int ret;
8022
8023         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8024
8025         ctx = i915_gem_alloc_object(dev, 4096);
8026         if (!ctx) {
8027                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8028                 return NULL;
8029         }
8030
8031         ret = i915_gem_object_pin(ctx, 4096, true);
8032         if (ret) {
8033                 DRM_ERROR("failed to pin power context: %d\n", ret);
8034                 goto err_unref;
8035         }
8036
8037         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8038         if (ret) {
8039                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8040                 goto err_unpin;
8041         }
8042
8043         return ctx;
8044
8045 err_unpin:
8046         i915_gem_object_unpin(ctx);
8047 err_unref:
8048         drm_gem_object_unreference(&ctx->base);
8049         mutex_unlock(&dev->struct_mutex);
8050         return NULL;
8051 }
8052
8053 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8054 {
8055         struct drm_i915_private *dev_priv = dev->dev_private;
8056         u16 rgvswctl;
8057
8058         rgvswctl = I915_READ16(MEMSWCTL);
8059         if (rgvswctl & MEMCTL_CMD_STS) {
8060                 DRM_DEBUG("gpu busy, RCS change rejected\n");
8061                 return false; /* still busy with another command */
8062         }
8063
8064         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8065                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8066         I915_WRITE16(MEMSWCTL, rgvswctl);
8067         POSTING_READ16(MEMSWCTL);
8068
8069         rgvswctl |= MEMCTL_CMD_STS;
8070         I915_WRITE16(MEMSWCTL, rgvswctl);
8071
8072         return true;
8073 }
8074
8075 void ironlake_enable_drps(struct drm_device *dev)
8076 {
8077         struct drm_i915_private *dev_priv = dev->dev_private;
8078         u32 rgvmodectl = I915_READ(MEMMODECTL);
8079         u8 fmax, fmin, fstart, vstart;
8080
8081         /* Enable temp reporting */
8082         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8083         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8084
8085         /* 100ms RC evaluation intervals */
8086         I915_WRITE(RCUPEI, 100000);
8087         I915_WRITE(RCDNEI, 100000);
8088
8089         /* Set max/min thresholds to 90ms and 80ms respectively */
8090         I915_WRITE(RCBMAXAVG, 90000);
8091         I915_WRITE(RCBMINAVG, 80000);
8092
8093         I915_WRITE(MEMIHYST, 1);
8094
8095         /* Set up min, max, and cur for interrupt handling */
8096         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8097         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8098         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8099                 MEMMODE_FSTART_SHIFT;
8100
8101         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8102                 PXVFREQ_PX_SHIFT;
8103
8104         dev_priv->fmax = fmax; /* IPS callback will increase this */
8105         dev_priv->fstart = fstart;
8106
8107         dev_priv->max_delay = fstart;
8108         dev_priv->min_delay = fmin;
8109         dev_priv->cur_delay = fstart;
8110
8111         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8112                          fmax, fmin, fstart);
8113
8114         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8115
8116         /*
8117          * Interrupts will be enabled in ironlake_irq_postinstall
8118          */
8119
8120         I915_WRITE(VIDSTART, vstart);
8121         POSTING_READ(VIDSTART);
8122
8123         rgvmodectl |= MEMMODE_SWMODE_EN;
8124         I915_WRITE(MEMMODECTL, rgvmodectl);
8125
8126         if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8127                 DRM_ERROR("stuck trying to change perf mode\n");
8128         msleep(1);
8129
8130         ironlake_set_drps(dev, fstart);
8131
8132         dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8133                 I915_READ(0x112e0);
8134         dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8135         dev_priv->last_count2 = I915_READ(0x112f4);
8136         getrawmonotonic(&dev_priv->last_time2);
8137 }
8138
8139 void ironlake_disable_drps(struct drm_device *dev)
8140 {
8141         struct drm_i915_private *dev_priv = dev->dev_private;
8142         u16 rgvswctl = I915_READ16(MEMSWCTL);
8143
8144         /* Ack interrupts, disable EFC interrupt */
8145         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8146         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8147         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8148         I915_WRITE(DEIIR, DE_PCU_EVENT);
8149         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8150
8151         /* Go back to the starting frequency */
8152         ironlake_set_drps(dev, dev_priv->fstart);
8153         msleep(1);
8154         rgvswctl |= MEMCTL_CMD_STS;
8155         I915_WRITE(MEMSWCTL, rgvswctl);
8156         msleep(1);
8157
8158 }
8159
8160 void gen6_set_rps(struct drm_device *dev, u8 val)
8161 {
8162         struct drm_i915_private *dev_priv = dev->dev_private;
8163         u32 swreq;
8164
8165         swreq = (val & 0x3ff) << 25;
8166         I915_WRITE(GEN6_RPNSWREQ, swreq);
8167 }
8168
8169 void gen6_disable_rps(struct drm_device *dev)
8170 {
8171         struct drm_i915_private *dev_priv = dev->dev_private;
8172
8173         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8174         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8175         I915_WRITE(GEN6_PMIER, 0);
8176         /* Complete PM interrupt masking here doesn't race with the rps work
8177          * item again unmasking PM interrupts because that is using a different
8178          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8179          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8180
8181         spin_lock_irq(&dev_priv->rps_lock);
8182         dev_priv->pm_iir = 0;
8183         spin_unlock_irq(&dev_priv->rps_lock);
8184
8185         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8186 }
8187
8188 static unsigned long intel_pxfreq(u32 vidfreq)
8189 {
8190         unsigned long freq;
8191         int div = (vidfreq & 0x3f0000) >> 16;
8192         int post = (vidfreq & 0x3000) >> 12;
8193         int pre = (vidfreq & 0x7);
8194
8195         if (!pre)
8196                 return 0;
8197
8198         freq = ((div * 133333) / ((1<<post) * pre));
8199
8200         return freq;
8201 }
8202
8203 void intel_init_emon(struct drm_device *dev)
8204 {
8205         struct drm_i915_private *dev_priv = dev->dev_private;
8206         u32 lcfuse;
8207         u8 pxw[16];
8208         int i;
8209
8210         /* Disable to program */
8211         I915_WRITE(ECR, 0);
8212         POSTING_READ(ECR);
8213
8214         /* Program energy weights for various events */
8215         I915_WRITE(SDEW, 0x15040d00);
8216         I915_WRITE(CSIEW0, 0x007f0000);
8217         I915_WRITE(CSIEW1, 0x1e220004);
8218         I915_WRITE(CSIEW2, 0x04000004);
8219
8220         for (i = 0; i < 5; i++)
8221                 I915_WRITE(PEW + (i * 4), 0);
8222         for (i = 0; i < 3; i++)
8223                 I915_WRITE(DEW + (i * 4), 0);
8224
8225         /* Program P-state weights to account for frequency power adjustment */
8226         for (i = 0; i < 16; i++) {
8227                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8228                 unsigned long freq = intel_pxfreq(pxvidfreq);
8229                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8230                         PXVFREQ_PX_SHIFT;
8231                 unsigned long val;
8232
8233                 val = vid * vid;
8234                 val *= (freq / 1000);
8235                 val *= 255;
8236                 val /= (127*127*900);
8237                 if (val > 0xff)
8238                         DRM_ERROR("bad pxval: %ld\n", val);
8239                 pxw[i] = val;
8240         }
8241         /* Render standby states get 0 weight */
8242         pxw[14] = 0;
8243         pxw[15] = 0;
8244
8245         for (i = 0; i < 4; i++) {
8246                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8247                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8248                 I915_WRITE(PXW + (i * 4), val);
8249         }
8250
8251         /* Adjust magic regs to magic values (more experimental results) */
8252         I915_WRITE(OGW0, 0);
8253         I915_WRITE(OGW1, 0);
8254         I915_WRITE(EG0, 0x00007f00);
8255         I915_WRITE(EG1, 0x0000000e);
8256         I915_WRITE(EG2, 0x000e0000);
8257         I915_WRITE(EG3, 0x68000300);
8258         I915_WRITE(EG4, 0x42000000);
8259         I915_WRITE(EG5, 0x00140031);
8260         I915_WRITE(EG6, 0);
8261         I915_WRITE(EG7, 0);
8262
8263         for (i = 0; i < 8; i++)
8264                 I915_WRITE(PXWL + (i * 4), 0);
8265
8266         /* Enable PMON + select events */
8267         I915_WRITE(ECR, 0x80000019);
8268
8269         lcfuse = I915_READ(LCFUSE02);
8270
8271         dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8272 }
8273
8274 static int intel_enable_rc6(struct drm_device *dev)
8275 {
8276         /*
8277          * Respect the kernel parameter if it is set
8278          */
8279         if (i915_enable_rc6 >= 0)
8280                 return i915_enable_rc6;
8281
8282         /*
8283          * Disable RC6 on Ironlake
8284          */
8285         if (INTEL_INFO(dev)->gen == 5)
8286                 return 0;
8287
8288         /*
8289          * Disable rc6 on Sandybridge
8290          */
8291         if (INTEL_INFO(dev)->gen == 6) {
8292                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8293                 return INTEL_RC6_ENABLE;
8294         }
8295         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8296         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8297 }
8298
8299 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8300 {
8301         u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8302         u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8303         u32 pcu_mbox, rc6_mask = 0;
8304         u32 gtfifodbg;
8305         u8 cur_delay, min_delay, max_delay;
8306         int rc6_mode;
8307         int i;
8308
8309         /* Here begins a magic sequence of register writes to enable
8310          * auto-downclocking.
8311          *
8312          * Perhaps there might be some value in exposing these to
8313          * userspace...
8314          */
8315         I915_WRITE(GEN6_RC_STATE, 0);
8316         mutex_lock(&dev_priv->dev->struct_mutex);
8317
8318         /* Clear the DBG now so we don't confuse earlier errors */
8319         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8320                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8321                 I915_WRITE(GTFIFODBG, gtfifodbg);
8322         }
8323
8324         gen6_gt_force_wake_get(dev_priv);
8325
8326         /* disable the counters and set deterministic thresholds */
8327         I915_WRITE(GEN6_RC_CONTROL, 0);
8328
8329         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8330         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8331         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8332         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8333         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8334
8335         for (i = 0; i < I915_NUM_RINGS; i++)
8336                 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8337
8338         I915_WRITE(GEN6_RC_SLEEP, 0);
8339         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8340         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8341         I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
8342         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8343
8344         rc6_mode = intel_enable_rc6(dev_priv->dev);
8345         if (rc6_mode & INTEL_RC6_ENABLE)
8346                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8347
8348         if (rc6_mode & INTEL_RC6p_ENABLE)
8349                 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8350
8351         if (rc6_mode & INTEL_RC6pp_ENABLE)
8352                 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8353
8354         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8355                         (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8356                         (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8357                         (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8358
8359         I915_WRITE(GEN6_RC_CONTROL,
8360                    rc6_mask |
8361                    GEN6_RC_CTL_EI_MODE(1) |
8362                    GEN6_RC_CTL_HW_ENABLE);
8363
8364         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8365         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8366                    18 << 24 |
8367                    6 << 16);
8368         /*
8369          * These thresholds found through experimentation. Making them
8370          * symmetric will prevent holding the clock high when the workload is
8371          * light. This allows us to improve our power usage, and lower thermals.
8372          */
8373         I915_WRITE(GEN6_RP_UP_THRESHOLD, 0x4000);
8374         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x4000);
8375
8376         I915_WRITE(GEN6_RP_UP_EI, 100000);
8377         I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8378         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8379         I915_WRITE(GEN6_RP_CONTROL,
8380                    GEN6_RP_MEDIA_TURBO |
8381                    GEN6_RP_MEDIA_HW_MODE |
8382                    GEN6_RP_MEDIA_IS_GFX |
8383                    GEN6_RP_ENABLE |
8384                    GEN6_RP_UP_BUSY_AVG |
8385                    GEN6_RP_DOWN_IDLE_CONT);
8386
8387         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8388                      500))
8389                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8390
8391         I915_WRITE(GEN6_PCODE_DATA, 0);
8392         I915_WRITE(GEN6_PCODE_MAILBOX,
8393                    GEN6_PCODE_READY |
8394                    GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8395         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8396                      500))
8397                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8398
8399         min_delay = (rp_state_cap & 0xff0000) >> 16;
8400         max_delay = rp_state_cap & 0xff;
8401         cur_delay = (gt_perf_status & 0xff00) >> 8;
8402
8403         /* Check for overclock support */
8404         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8405                      500))
8406                 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8407         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8408         pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8409         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8410                      500))
8411                 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8412         if (pcu_mbox & (1<<31)) { /* OC supported */
8413                 max_delay = pcu_mbox & 0xff;
8414                 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8415         }
8416
8417         /* In units of 100MHz */
8418         dev_priv->min_delay = max(min_delay, dev_priv->min_delay);
8419         dev_priv->max_delay = max_delay;
8420         dev_priv->cur_delay = max(dev_priv->min_delay, (u8)10);
8421
8422         I915_WRITE(GEN6_RPNSWREQ,
8423                    GEN6_FREQUENCY(dev_priv->cur_delay) |
8424                    GEN6_OFFSET(0) |
8425                    GEN6_AGGRESSIVE_TURBO);
8426         I915_WRITE(GEN6_RC_VIDEO_FREQ,
8427                    GEN6_FREQUENCY(12));
8428
8429         /* requires MSI enabled */
8430         I915_WRITE(GEN6_PMIER,
8431                    GEN6_PM_MBOX_EVENT |
8432                    GEN6_PM_THERMAL_EVENT |
8433                    GEN6_PM_RP_DOWN_TIMEOUT |
8434                    GEN6_PM_RP_UP_THRESHOLD |
8435                    GEN6_PM_RP_DOWN_THRESHOLD |
8436                    GEN6_PM_RP_UP_EI_EXPIRED |
8437                    GEN6_PM_RP_DOWN_EI_EXPIRED);
8438         spin_lock_irq(&dev_priv->rps_lock);
8439         WARN_ON(dev_priv->pm_iir != 0);
8440         I915_WRITE(GEN6_PMIMR, 0);
8441         spin_unlock_irq(&dev_priv->rps_lock);
8442         /* enable all PM interrupts */
8443         I915_WRITE(GEN6_PMINTRMSK, 0);
8444
8445         gen6_gt_force_wake_put(dev_priv);
8446         mutex_unlock(&dev_priv->dev->struct_mutex);
8447 }
8448
8449 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8450 {
8451         int min_freq = 15;
8452         int gpu_freq, ia_freq, max_ia_freq;
8453         int scaling_factor = 180;
8454
8455         max_ia_freq = cpufreq_quick_get_max(0);
8456         /*
8457          * Default to measured freq if none found, PCU will ensure we don't go
8458          * over
8459          */
8460         if (!max_ia_freq)
8461                 max_ia_freq = tsc_khz;
8462
8463         /* Convert from kHz to MHz */
8464         max_ia_freq /= 1000;
8465
8466         mutex_lock(&dev_priv->dev->struct_mutex);
8467
8468         /*
8469          * For each potential GPU frequency, load a ring frequency we'd like
8470          * to use for memory access.  We do this by specifying the IA frequency
8471          * the PCU should use as a reference to determine the ring frequency.
8472          */
8473         for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8474              gpu_freq--) {
8475                 int diff = dev_priv->max_delay - gpu_freq;
8476
8477                 /*
8478                  * For GPU frequencies less than 750MHz, just use the lowest
8479                  * ring freq.
8480                  */
8481                 if (gpu_freq < min_freq)
8482                         ia_freq = 800;
8483                 else
8484                         ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8485                 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8486
8487                 I915_WRITE(GEN6_PCODE_DATA,
8488                            (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8489                            gpu_freq);
8490                 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8491                            GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8492                 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8493                               GEN6_PCODE_READY) == 0, 10)) {
8494                         DRM_ERROR("pcode write of freq table timed out\n");
8495                         continue;
8496                 }
8497         }
8498
8499         mutex_unlock(&dev_priv->dev->struct_mutex);
8500 }
8501
8502 static void ironlake_init_clock_gating(struct drm_device *dev)
8503 {
8504         struct drm_i915_private *dev_priv = dev->dev_private;
8505         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8506
8507         /* Required for FBC */
8508         dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8509                 DPFCRUNIT_CLOCK_GATE_DISABLE |
8510                 DPFDUNIT_CLOCK_GATE_DISABLE;
8511         /* Required for CxSR */
8512         dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8513
8514         I915_WRITE(PCH_3DCGDIS0,
8515                    MARIUNIT_CLOCK_GATE_DISABLE |
8516                    SVSMUNIT_CLOCK_GATE_DISABLE);
8517         I915_WRITE(PCH_3DCGDIS1,
8518                    VFMUNIT_CLOCK_GATE_DISABLE);
8519
8520         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8521
8522         /*
8523          * According to the spec the following bits should be set in
8524          * order to enable memory self-refresh
8525          * The bit 22/21 of 0x42004
8526          * The bit 5 of 0x42020
8527          * The bit 15 of 0x45000
8528          */
8529         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8530                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
8531                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8532         I915_WRITE(ILK_DSPCLK_GATE,
8533                    (I915_READ(ILK_DSPCLK_GATE) |
8534                     ILK_DPARB_CLK_GATE));
8535         I915_WRITE(DISP_ARB_CTL,
8536                    (I915_READ(DISP_ARB_CTL) |
8537                     DISP_FBC_WM_DIS));
8538         I915_WRITE(WM3_LP_ILK, 0);
8539         I915_WRITE(WM2_LP_ILK, 0);
8540         I915_WRITE(WM1_LP_ILK, 0);
8541
8542         /*
8543          * Based on the document from hardware guys the following bits
8544          * should be set unconditionally in order to enable FBC.
8545          * The bit 22 of 0x42000
8546          * The bit 22 of 0x42004
8547          * The bit 7,8,9 of 0x42020.
8548          */
8549         if (IS_IRONLAKE_M(dev)) {
8550                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8551                            I915_READ(ILK_DISPLAY_CHICKEN1) |
8552                            ILK_FBCQ_DIS);
8553                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8554                            I915_READ(ILK_DISPLAY_CHICKEN2) |
8555                            ILK_DPARB_GATE);
8556                 I915_WRITE(ILK_DSPCLK_GATE,
8557                            I915_READ(ILK_DSPCLK_GATE) |
8558                            ILK_DPFC_DIS1 |
8559                            ILK_DPFC_DIS2 |
8560                            ILK_CLK_FBC);
8561         }
8562
8563         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8564                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8565                    ILK_ELPIN_409_SELECT);
8566         I915_WRITE(_3D_CHICKEN2,
8567                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8568                    _3D_CHICKEN2_WM_READ_PIPELINED);
8569 }
8570
8571 static void gen6_init_clock_gating(struct drm_device *dev)
8572 {
8573         struct drm_i915_private *dev_priv = dev->dev_private;
8574         int pipe;
8575         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8576
8577         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8578
8579         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8580                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8581                    ILK_ELPIN_409_SELECT);
8582
8583         I915_WRITE(WM3_LP_ILK, 0);
8584         I915_WRITE(WM2_LP_ILK, 0);
8585         I915_WRITE(WM1_LP_ILK, 0);
8586
8587         I915_WRITE(GEN6_UCGCTL1,
8588                    I915_READ(GEN6_UCGCTL1) |
8589                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8590
8591         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8592          * gating disable must be set.  Failure to set it results in
8593          * flickering pixels due to Z write ordering failures after
8594          * some amount of runtime in the Mesa "fire" demo, and Unigine
8595          * Sanctuary and Tropics, and apparently anything else with
8596          * alpha test or pixel discard.
8597          *
8598          * According to the spec, bit 11 (RCCUNIT) must also be set,
8599          * but we didn't debug actual testcases to find it out.
8600          */
8601         I915_WRITE(GEN6_UCGCTL2,
8602                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8603                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8604
8605         /*
8606          * According to the spec the following bits should be
8607          * set in order to enable memory self-refresh and fbc:
8608          * The bit21 and bit22 of 0x42000
8609          * The bit21 and bit22 of 0x42004
8610          * The bit5 and bit7 of 0x42020
8611          * The bit14 of 0x70180
8612          * The bit14 of 0x71180
8613          */
8614         I915_WRITE(ILK_DISPLAY_CHICKEN1,
8615                    I915_READ(ILK_DISPLAY_CHICKEN1) |
8616                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8617         I915_WRITE(ILK_DISPLAY_CHICKEN2,
8618                    I915_READ(ILK_DISPLAY_CHICKEN2) |
8619                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8620         I915_WRITE(ILK_DSPCLK_GATE,
8621                    I915_READ(ILK_DSPCLK_GATE) |
8622                    ILK_DPARB_CLK_GATE  |
8623                    ILK_DPFD_CLK_GATE);
8624
8625         for_each_pipe(pipe) {
8626                 I915_WRITE(DSPCNTR(pipe),
8627                            I915_READ(DSPCNTR(pipe)) |
8628                            DISPPLANE_TRICKLE_FEED_DISABLE);
8629                 intel_flush_display_plane(dev_priv, pipe);
8630         }
8631 }
8632
8633 static void ivybridge_init_clock_gating(struct drm_device *dev)
8634 {
8635         struct drm_i915_private *dev_priv = dev->dev_private;
8636         int pipe;
8637         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8638
8639         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8640
8641         I915_WRITE(WM3_LP_ILK, 0);
8642         I915_WRITE(WM2_LP_ILK, 0);
8643         I915_WRITE(WM1_LP_ILK, 0);
8644
8645         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8646          * This implements the WaDisableRCZUnitClockGating workaround.
8647          */
8648         I915_WRITE(GEN6_UCGCTL2,
8649                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
8650                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8651
8652         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8653
8654         I915_WRITE(IVB_CHICKEN3,
8655                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8656                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
8657
8658         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8659         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8660                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8661
8662         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8663         I915_WRITE(GEN7_L3CNTLREG1,
8664                         GEN7_WA_FOR_GEN7_L3_CONTROL);
8665         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8666                         GEN7_WA_L3_CHICKEN_MODE);
8667
8668         /* This is required by WaCatErrorRejectionIssue */
8669         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8670                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8671                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8672
8673         for_each_pipe(pipe) {
8674                 I915_WRITE(DSPCNTR(pipe),
8675                            I915_READ(DSPCNTR(pipe)) |
8676                            DISPPLANE_TRICKLE_FEED_DISABLE);
8677                 intel_flush_display_plane(dev_priv, pipe);
8678         }
8679 }
8680
8681 static void g4x_init_clock_gating(struct drm_device *dev)
8682 {
8683         struct drm_i915_private *dev_priv = dev->dev_private;
8684         uint32_t dspclk_gate;
8685
8686         I915_WRITE(RENCLK_GATE_D1, 0);
8687         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8688                    GS_UNIT_CLOCK_GATE_DISABLE |
8689                    CL_UNIT_CLOCK_GATE_DISABLE);
8690         I915_WRITE(RAMCLK_GATE_D, 0);
8691         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8692                 OVRUNIT_CLOCK_GATE_DISABLE |
8693                 OVCUNIT_CLOCK_GATE_DISABLE;
8694         if (IS_GM45(dev))
8695                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8696         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8697 }
8698
8699 static void crestline_init_clock_gating(struct drm_device *dev)
8700 {
8701         struct drm_i915_private *dev_priv = dev->dev_private;
8702
8703         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8704         I915_WRITE(RENCLK_GATE_D2, 0);
8705         I915_WRITE(DSPCLK_GATE_D, 0);
8706         I915_WRITE(RAMCLK_GATE_D, 0);
8707         I915_WRITE16(DEUC, 0);
8708 }
8709
8710 static void broadwater_init_clock_gating(struct drm_device *dev)
8711 {
8712         struct drm_i915_private *dev_priv = dev->dev_private;
8713
8714         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8715                    I965_RCC_CLOCK_GATE_DISABLE |
8716                    I965_RCPB_CLOCK_GATE_DISABLE |
8717                    I965_ISC_CLOCK_GATE_DISABLE |
8718                    I965_FBC_CLOCK_GATE_DISABLE);
8719         I915_WRITE(RENCLK_GATE_D2, 0);
8720 }
8721
8722 static void gen3_init_clock_gating(struct drm_device *dev)
8723 {
8724         struct drm_i915_private *dev_priv = dev->dev_private;
8725         u32 dstate = I915_READ(D_STATE);
8726
8727         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8728                 DSTATE_DOT_CLOCK_GATING;
8729         I915_WRITE(D_STATE, dstate);
8730 }
8731
8732 static void i85x_init_clock_gating(struct drm_device *dev)
8733 {
8734         struct drm_i915_private *dev_priv = dev->dev_private;
8735
8736         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8737 }
8738
8739 static void i830_init_clock_gating(struct drm_device *dev)
8740 {
8741         struct drm_i915_private *dev_priv = dev->dev_private;
8742
8743         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8744 }
8745
8746 static void ibx_init_clock_gating(struct drm_device *dev)
8747 {
8748         struct drm_i915_private *dev_priv = dev->dev_private;
8749
8750         /*
8751          * On Ibex Peak and Cougar Point, we need to disable clock
8752          * gating for the panel power sequencer or it will fail to
8753          * start up when no ports are active.
8754          */
8755         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8756 }
8757
8758 static void cpt_init_clock_gating(struct drm_device *dev)
8759 {
8760         struct drm_i915_private *dev_priv = dev->dev_private;
8761         int pipe;
8762
8763         /*
8764          * On Ibex Peak and Cougar Point, we need to disable clock
8765          * gating for the panel power sequencer or it will fail to
8766          * start up when no ports are active.
8767          */
8768         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8769         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8770                    DPLS_EDP_PPS_FIX_DIS);
8771         /* Without this, mode sets may fail silently on FDI */
8772         for_each_pipe(pipe)
8773                 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8774 }
8775
8776 static void ironlake_teardown_rc6(struct drm_device *dev)
8777 {
8778         struct drm_i915_private *dev_priv = dev->dev_private;
8779
8780         if (dev_priv->renderctx) {
8781                 i915_gem_object_unpin(dev_priv->renderctx);
8782                 drm_gem_object_unreference(&dev_priv->renderctx->base);
8783                 dev_priv->renderctx = NULL;
8784         }
8785
8786         if (dev_priv->pwrctx) {
8787                 i915_gem_object_unpin(dev_priv->pwrctx);
8788                 drm_gem_object_unreference(&dev_priv->pwrctx->base);
8789                 dev_priv->pwrctx = NULL;
8790         }
8791 }
8792
8793 static void ironlake_disable_rc6(struct drm_device *dev)
8794 {
8795         struct drm_i915_private *dev_priv = dev->dev_private;
8796
8797         if (I915_READ(PWRCTXA)) {
8798                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8799                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8800                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8801                          50);
8802
8803                 I915_WRITE(PWRCTXA, 0);
8804                 POSTING_READ(PWRCTXA);
8805
8806                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8807                 POSTING_READ(RSTDBYCTL);
8808         }
8809
8810         ironlake_teardown_rc6(dev);
8811 }
8812
8813 static int ironlake_setup_rc6(struct drm_device *dev)
8814 {
8815         struct drm_i915_private *dev_priv = dev->dev_private;
8816
8817         if (dev_priv->renderctx == NULL)
8818                 dev_priv->renderctx = intel_alloc_context_page(dev);
8819         if (!dev_priv->renderctx)
8820                 return -ENOMEM;
8821
8822         if (dev_priv->pwrctx == NULL)
8823                 dev_priv->pwrctx = intel_alloc_context_page(dev);
8824         if (!dev_priv->pwrctx) {
8825                 ironlake_teardown_rc6(dev);
8826                 return -ENOMEM;
8827         }
8828
8829         return 0;
8830 }
8831
8832 void ironlake_enable_rc6(struct drm_device *dev)
8833 {
8834         struct drm_i915_private *dev_priv = dev->dev_private;
8835         int ret;
8836
8837         /* rc6 disabled by default due to repeated reports of hanging during
8838          * boot and resume.
8839          */
8840         if (!intel_enable_rc6(dev))
8841                 return;
8842
8843         mutex_lock(&dev->struct_mutex);
8844         ret = ironlake_setup_rc6(dev);
8845         if (ret) {
8846                 mutex_unlock(&dev->struct_mutex);
8847                 return;
8848         }
8849
8850         /*
8851          * GPU can automatically power down the render unit if given a page
8852          * to save state.
8853          */
8854         ret = BEGIN_LP_RING(6);
8855         if (ret) {
8856                 ironlake_teardown_rc6(dev);
8857                 mutex_unlock(&dev->struct_mutex);
8858                 return;
8859         }
8860
8861         OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8862         OUT_RING(MI_SET_CONTEXT);
8863         OUT_RING(dev_priv->renderctx->gtt_offset |
8864                  MI_MM_SPACE_GTT |
8865                  MI_SAVE_EXT_STATE_EN |
8866                  MI_RESTORE_EXT_STATE_EN |
8867                  MI_RESTORE_INHIBIT);
8868         OUT_RING(MI_SUSPEND_FLUSH);
8869         OUT_RING(MI_NOOP);
8870         OUT_RING(MI_FLUSH);
8871         ADVANCE_LP_RING();
8872
8873         /*
8874          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8875          * does an implicit flush, combined with MI_FLUSH above, it should be
8876          * safe to assume that renderctx is valid
8877          */
8878         ret = intel_wait_ring_idle(LP_RING(dev_priv));
8879         if (ret) {
8880                 DRM_ERROR("failed to enable ironlake power power savings\n");
8881                 ironlake_teardown_rc6(dev);
8882                 mutex_unlock(&dev->struct_mutex);
8883                 return;
8884         }
8885
8886         I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8887         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8888         mutex_unlock(&dev->struct_mutex);
8889 }
8890
8891 void intel_init_clock_gating(struct drm_device *dev)
8892 {
8893         struct drm_i915_private *dev_priv = dev->dev_private;
8894
8895         dev_priv->display.init_clock_gating(dev);
8896
8897         if (dev_priv->display.init_pch_clock_gating)
8898                 dev_priv->display.init_pch_clock_gating(dev);
8899 }
8900
8901 /* Set up chip specific display functions */
8902 static void intel_init_display(struct drm_device *dev)
8903 {
8904         struct drm_i915_private *dev_priv = dev->dev_private;
8905
8906         /* We always want a DPMS function */
8907         if (HAS_PCH_SPLIT(dev)) {
8908                 dev_priv->display.dpms = ironlake_crtc_dpms;
8909                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8910                 dev_priv->display.update_plane = ironlake_update_plane;
8911         } else {
8912                 dev_priv->display.dpms = i9xx_crtc_dpms;
8913                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8914                 dev_priv->display.update_plane = i9xx_update_plane;
8915         }
8916
8917         if (I915_HAS_FBC(dev)) {
8918                 if (HAS_PCH_SPLIT(dev)) {
8919                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8920                         dev_priv->display.enable_fbc = ironlake_enable_fbc;
8921                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
8922                 } else if (IS_GM45(dev)) {
8923                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8924                         dev_priv->display.enable_fbc = g4x_enable_fbc;
8925                         dev_priv->display.disable_fbc = g4x_disable_fbc;
8926                 } else if (IS_CRESTLINE(dev)) {
8927                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8928                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
8929                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
8930                 }
8931                 /* 855GM needs testing */
8932         }
8933
8934         /* Returns the core display clock speed */
8935         if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8936                 dev_priv->display.get_display_clock_speed =
8937                         i945_get_display_clock_speed;
8938         else if (IS_I915G(dev))
8939                 dev_priv->display.get_display_clock_speed =
8940                         i915_get_display_clock_speed;
8941         else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
8942                 dev_priv->display.get_display_clock_speed =
8943                         i9xx_misc_get_display_clock_speed;
8944         else if (IS_I915GM(dev))
8945                 dev_priv->display.get_display_clock_speed =
8946                         i915gm_get_display_clock_speed;
8947         else if (IS_I865G(dev))
8948                 dev_priv->display.get_display_clock_speed =
8949                         i865_get_display_clock_speed;
8950         else if (IS_I85X(dev))
8951                 dev_priv->display.get_display_clock_speed =
8952                         i855_get_display_clock_speed;
8953         else /* 852, 830 */
8954                 dev_priv->display.get_display_clock_speed =
8955                         i830_get_display_clock_speed;
8956
8957         /* For FIFO watermark updates */
8958         if (HAS_PCH_SPLIT(dev)) {
8959                 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8960                 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8961
8962                 /* IVB configs may use multi-threaded forcewake */
8963                 if (IS_IVYBRIDGE(dev)) {
8964                         u32     ecobus;
8965
8966                         /* A small trick here - if the bios hasn't configured MT forcewake,
8967                          * and if the device is in RC6, then force_wake_mt_get will not wake
8968                          * the device and the ECOBUS read will return zero. Which will be
8969                          * (correctly) interpreted by the test below as MT forcewake being
8970                          * disabled.
8971                          */
8972                         mutex_lock(&dev->struct_mutex);
8973                         __gen6_gt_force_wake_mt_get(dev_priv);
8974                         ecobus = I915_READ_NOTRACE(ECOBUS);
8975                         __gen6_gt_force_wake_mt_put(dev_priv);
8976                         mutex_unlock(&dev->struct_mutex);
8977
8978                         if (ecobus & FORCEWAKE_MT_ENABLE) {
8979                                 DRM_DEBUG_KMS("Using MT version of forcewake\n");
8980                                 dev_priv->display.force_wake_get =
8981                                         __gen6_gt_force_wake_mt_get;
8982                                 dev_priv->display.force_wake_put =
8983                                         __gen6_gt_force_wake_mt_put;
8984                         }
8985                 }
8986
8987                 if (HAS_PCH_IBX(dev))
8988                         dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8989                 else if (HAS_PCH_CPT(dev))
8990                         dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
8991
8992                 if (IS_GEN5(dev)) {
8993                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
8994                                 dev_priv->display.update_wm = ironlake_update_wm;
8995                         else {
8996                                 DRM_DEBUG_KMS("Failed to get proper latency. "
8997                                               "Disable CxSR\n");
8998                                 dev_priv->display.update_wm = NULL;
8999                         }
9000                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9001                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9002                         dev_priv->display.write_eld = ironlake_write_eld;
9003                 } else if (IS_GEN6(dev)) {
9004                         if (SNB_READ_WM0_LATENCY()) {
9005                                 dev_priv->display.update_wm = sandybridge_update_wm;
9006                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9007                         } else {
9008                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
9009                                               "Disable CxSR\n");
9010                                 dev_priv->display.update_wm = NULL;
9011                         }
9012                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9013                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9014                         dev_priv->display.write_eld = ironlake_write_eld;
9015                 } else if (IS_IVYBRIDGE(dev)) {
9016                         /* FIXME: detect B0+ stepping and use auto training */
9017                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9018                         if (SNB_READ_WM0_LATENCY()) {
9019                                 dev_priv->display.update_wm = sandybridge_update_wm;
9020                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9021                         } else {
9022                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
9023                                               "Disable CxSR\n");
9024                                 dev_priv->display.update_wm = NULL;
9025                         }
9026                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9027                         dev_priv->display.write_eld = ironlake_write_eld;
9028                 } else
9029                         dev_priv->display.update_wm = NULL;
9030         } else if (IS_PINEVIEW(dev)) {
9031                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9032                                             dev_priv->is_ddr3,
9033                                             dev_priv->fsb_freq,
9034                                             dev_priv->mem_freq)) {
9035                         DRM_INFO("failed to find known CxSR latency "
9036                                  "(found ddr%s fsb freq %d, mem freq %d), "
9037                                  "disabling CxSR\n",
9038                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
9039                                  dev_priv->fsb_freq, dev_priv->mem_freq);
9040                         /* Disable CxSR and never update its watermark again */
9041                         pineview_disable_cxsr(dev);
9042                         dev_priv->display.update_wm = NULL;
9043                 } else
9044                         dev_priv->display.update_wm = pineview_update_wm;
9045                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9046         } else if (IS_G4X(dev)) {
9047                 dev_priv->display.write_eld = g4x_write_eld;
9048                 dev_priv->display.update_wm = g4x_update_wm;
9049                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9050         } else if (IS_GEN4(dev)) {
9051                 dev_priv->display.update_wm = i965_update_wm;
9052                 if (IS_CRESTLINE(dev))
9053                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9054                 else if (IS_BROADWATER(dev))
9055                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9056         } else if (IS_GEN3(dev)) {
9057                 dev_priv->display.update_wm = i9xx_update_wm;
9058                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9059                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9060         } else if (IS_I865G(dev)) {
9061                 dev_priv->display.update_wm = i830_update_wm;
9062                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9063                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9064         } else if (IS_I85X(dev)) {
9065                 dev_priv->display.update_wm = i9xx_update_wm;
9066                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9067                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9068         } else {
9069                 dev_priv->display.update_wm = i830_update_wm;
9070                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9071                 if (IS_845G(dev))
9072                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
9073                 else
9074                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
9075         }
9076
9077         /* Default just returns -ENODEV to indicate unsupported */
9078         dev_priv->display.queue_flip = intel_default_queue_flip;
9079
9080         switch (INTEL_INFO(dev)->gen) {
9081         case 2:
9082                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
9083                 break;
9084
9085         case 3:
9086                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
9087                 break;
9088
9089         case 4:
9090         case 5:
9091                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
9092                 break;
9093
9094         case 6:
9095                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
9096                 break;
9097         case 7:
9098                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
9099                 break;
9100         }
9101 }
9102
9103 /*
9104  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9105  * resume, or other times.  This quirk makes sure that's the case for
9106  * affected systems.
9107  */
9108 static void quirk_pipea_force(struct drm_device *dev)
9109 {
9110         struct drm_i915_private *dev_priv = dev->dev_private;
9111
9112         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9113         DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
9114 }
9115
9116 /*
9117  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9118  */
9119 static void quirk_ssc_force_disable(struct drm_device *dev)
9120 {
9121         struct drm_i915_private *dev_priv = dev->dev_private;
9122         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9123 }
9124
9125 struct intel_quirk {
9126         int device;
9127         int subsystem_vendor;
9128         int subsystem_device;
9129         void (*hook)(struct drm_device *dev);
9130 };
9131
9132 struct intel_quirk intel_quirks[] = {
9133         /* HP Mini needs pipe A force quirk (LP: #322104) */
9134         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9135
9136         /* Thinkpad R31 needs pipe A force quirk */
9137         { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9138         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9139         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9140
9141         /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9142         { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9143         /* ThinkPad X40 needs pipe A force quirk */
9144
9145         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9146         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9147
9148         /* 855 & before need to leave pipe A & dpll A up */
9149         { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9150         { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9151
9152         /* Lenovo U160 cannot use SSC on LVDS */
9153         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9154
9155         /* Sony Vaio Y cannot use SSC on LVDS */
9156         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9157 };
9158
9159 static void intel_init_quirks(struct drm_device *dev)
9160 {
9161         struct pci_dev *d = dev->pdev;
9162         int i;
9163
9164         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
9165                 struct intel_quirk *q = &intel_quirks[i];
9166
9167                 if (d->device == q->device &&
9168                     (d->subsystem_vendor == q->subsystem_vendor ||
9169                      q->subsystem_vendor == PCI_ANY_ID) &&
9170                     (d->subsystem_device == q->subsystem_device ||
9171                      q->subsystem_device == PCI_ANY_ID))
9172                         q->hook(dev);
9173         }
9174 }
9175
9176 /* Disable the VGA plane that we never use */
9177 static void i915_disable_vga(struct drm_device *dev)
9178 {
9179         struct drm_i915_private *dev_priv = dev->dev_private;
9180         u8 sr1;
9181         u32 vga_reg;
9182
9183         if (HAS_PCH_SPLIT(dev))
9184                 vga_reg = CPU_VGACNTRL;
9185         else
9186                 vga_reg = VGACNTRL;
9187
9188         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9189         outb(1, VGA_SR_INDEX);
9190         sr1 = inb(VGA_SR_DATA);
9191         outb(sr1 | 1<<5, VGA_SR_DATA);
9192         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9193         udelay(300);
9194
9195         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9196         POSTING_READ(vga_reg);
9197 }
9198
9199 void intel_modeset_init(struct drm_device *dev)
9200 {
9201         struct drm_i915_private *dev_priv = dev->dev_private;
9202         int i, ret;
9203
9204         drm_mode_config_init(dev);
9205
9206         dev->mode_config.min_width = 0;
9207         dev->mode_config.min_height = 0;
9208
9209         dev->mode_config.preferred_depth = 24;
9210         dev->mode_config.prefer_shadow = 1;
9211
9212         dev->mode_config.funcs = (void *)&intel_mode_funcs;
9213
9214         intel_init_quirks(dev);
9215
9216         intel_init_display(dev);
9217
9218         if (IS_GEN2(dev)) {
9219                 dev->mode_config.max_width = 2048;
9220                 dev->mode_config.max_height = 2048;
9221         } else if (IS_GEN3(dev)) {
9222                 dev->mode_config.max_width = 4096;
9223                 dev->mode_config.max_height = 4096;
9224         } else {
9225                 dev->mode_config.max_width = 8192;
9226                 dev->mode_config.max_height = 8192;
9227         }
9228         dev->mode_config.fb_base = dev->agp->base;
9229
9230         DRM_DEBUG_KMS("%d display pipe%s available.\n",
9231                       dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9232
9233         for (i = 0; i < dev_priv->num_pipe; i++) {
9234                 intel_crtc_init(dev, i);
9235                 ret = intel_plane_init(dev, i);
9236                 if (ret)
9237                         DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9238         }
9239
9240         /* Just disable it once at startup */
9241         i915_disable_vga(dev);
9242         intel_setup_outputs(dev);
9243
9244         intel_init_clock_gating(dev);
9245
9246         if (IS_IRONLAKE_M(dev)) {
9247                 ironlake_enable_drps(dev);
9248                 intel_init_emon(dev);
9249         }
9250
9251         if (IS_GEN6(dev) || IS_GEN7(dev)) {
9252                 gen6_enable_rps(dev_priv);
9253                 gen6_update_ring_freq(dev_priv);
9254         }
9255
9256         INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9257         setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
9258                     (unsigned long)dev);
9259 }
9260
9261 void intel_modeset_gem_init(struct drm_device *dev)
9262 {
9263         if (IS_IRONLAKE_M(dev))
9264                 ironlake_enable_rc6(dev);
9265
9266         intel_setup_overlay(dev);
9267 }
9268
9269 void intel_modeset_cleanup(struct drm_device *dev)
9270 {
9271         struct drm_i915_private *dev_priv = dev->dev_private;
9272         struct drm_crtc *crtc;
9273         struct intel_crtc *intel_crtc;
9274
9275         drm_kms_helper_poll_fini(dev);
9276         mutex_lock(&dev->struct_mutex);
9277
9278         intel_unregister_dsm_handler();
9279
9280
9281         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9282                 /* Skip inactive CRTCs */
9283                 if (!crtc->fb)
9284                         continue;
9285
9286                 intel_crtc = to_intel_crtc(crtc);
9287                 intel_increase_pllclock(crtc);
9288         }
9289
9290         intel_disable_fbc(dev);
9291
9292         if (IS_IRONLAKE_M(dev))
9293                 ironlake_disable_drps(dev);
9294         if (IS_GEN6(dev) || IS_GEN7(dev))
9295                 gen6_disable_rps(dev);
9296
9297         if (IS_IRONLAKE_M(dev))
9298                 ironlake_disable_rc6(dev);
9299
9300         mutex_unlock(&dev->struct_mutex);
9301
9302         /* Disable the irq before mode object teardown, for the irq might
9303          * enqueue unpin/hotplug work. */
9304         drm_irq_uninstall(dev);
9305         cancel_work_sync(&dev_priv->hotplug_work);
9306         cancel_work_sync(&dev_priv->rps_work);
9307
9308         /* flush any delayed tasks or pending work */
9309         flush_scheduled_work();
9310
9311         /* Shut off idle work before the crtcs get freed. */
9312         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9313                 intel_crtc = to_intel_crtc(crtc);
9314                 del_timer_sync(&intel_crtc->idle_timer);
9315         }
9316         del_timer_sync(&dev_priv->idle_timer);
9317         cancel_work_sync(&dev_priv->idle_work);
9318
9319         drm_mode_config_cleanup(dev);
9320 }
9321
9322 /*
9323  * Return which encoder is currently attached for connector.
9324  */
9325 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9326 {
9327         return &intel_attached_encoder(connector)->base;
9328 }
9329
9330 void intel_connector_attach_encoder(struct intel_connector *connector,
9331                                     struct intel_encoder *encoder)
9332 {
9333         connector->encoder = encoder;
9334         drm_mode_connector_attach_encoder(&connector->base,
9335                                           &encoder->base);
9336 }
9337
9338 /*
9339  * set vga decode state - true == enable VGA decode
9340  */
9341 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9342 {
9343         struct drm_i915_private *dev_priv = dev->dev_private;
9344         u16 gmch_ctrl;
9345
9346         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9347         if (state)
9348                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9349         else
9350                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9351         pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9352         return 0;
9353 }
9354
9355 #ifdef CONFIG_DEBUG_FS
9356 #include <linux/seq_file.h>
9357
9358 struct intel_display_error_state {
9359         struct intel_cursor_error_state {
9360                 u32 control;
9361                 u32 position;
9362                 u32 base;
9363                 u32 size;
9364         } cursor[2];
9365
9366         struct intel_pipe_error_state {
9367                 u32 conf;
9368                 u32 source;
9369
9370                 u32 htotal;
9371                 u32 hblank;
9372                 u32 hsync;
9373                 u32 vtotal;
9374                 u32 vblank;
9375                 u32 vsync;
9376         } pipe[2];
9377
9378         struct intel_plane_error_state {
9379                 u32 control;
9380                 u32 stride;
9381                 u32 size;
9382                 u32 pos;
9383                 u32 addr;
9384                 u32 surface;
9385                 u32 tile_offset;
9386         } plane[2];
9387 };
9388
9389 struct intel_display_error_state *
9390 intel_display_capture_error_state(struct drm_device *dev)
9391 {
9392         drm_i915_private_t *dev_priv = dev->dev_private;
9393         struct intel_display_error_state *error;
9394         int i;
9395
9396         error = kmalloc(sizeof(*error), GFP_ATOMIC);
9397         if (error == NULL)
9398                 return NULL;
9399
9400         for (i = 0; i < 2; i++) {
9401                 error->cursor[i].control = I915_READ(CURCNTR(i));
9402                 error->cursor[i].position = I915_READ(CURPOS(i));
9403                 error->cursor[i].base = I915_READ(CURBASE(i));
9404
9405                 error->plane[i].control = I915_READ(DSPCNTR(i));
9406                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9407                 error->plane[i].size = I915_READ(DSPSIZE(i));
9408                 error->plane[i].pos = I915_READ(DSPPOS(i));
9409                 error->plane[i].addr = I915_READ(DSPADDR(i));
9410                 if (INTEL_INFO(dev)->gen >= 4) {
9411                         error->plane[i].surface = I915_READ(DSPSURF(i));
9412                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9413                 }
9414
9415                 error->pipe[i].conf = I915_READ(PIPECONF(i));
9416                 error->pipe[i].source = I915_READ(PIPESRC(i));
9417                 error->pipe[i].htotal = I915_READ(HTOTAL(i));
9418                 error->pipe[i].hblank = I915_READ(HBLANK(i));
9419                 error->pipe[i].hsync = I915_READ(HSYNC(i));
9420                 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9421                 error->pipe[i].vblank = I915_READ(VBLANK(i));
9422                 error->pipe[i].vsync = I915_READ(VSYNC(i));
9423         }
9424
9425         return error;
9426 }
9427
9428 void
9429 intel_display_print_error_state(struct seq_file *m,
9430                                 struct drm_device *dev,
9431                                 struct intel_display_error_state *error)
9432 {
9433         int i;
9434
9435         for (i = 0; i < 2; i++) {
9436                 seq_printf(m, "Pipe [%d]:\n", i);
9437                 seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9438                 seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9439                 seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9440                 seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9441                 seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9442                 seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9443                 seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9444                 seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9445
9446                 seq_printf(m, "Plane [%d]:\n", i);
9447                 seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9448                 seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9449                 seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9450                 seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
9451                 seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9452                 if (INTEL_INFO(dev)->gen >= 4) {
9453                         seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9454                         seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9455                 }
9456
9457                 seq_printf(m, "Cursor [%d]:\n", i);
9458                 seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9459                 seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
9460                 seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9461         }
9462 }
9463 #endif