2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
37 * RC6 is a special power stage which allows the GPU to enter an very
38 * low-voltage mode when idle, using down to 0V while at this stage. This
39 * stage is entered automatically when the GPU is idle when RC6 support is
40 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
42 * There are different RC6 modes available in Intel GPU, which differentiate
43 * among each other with the latency required to enter and leave RC6 and
44 * voltage consumed by the GPU in different states.
46 * The combination of the following flags define which states GPU is allowed
47 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48 * RC6pp is deepest RC6. Their support by hardware varies according to the
49 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50 * which brings the most power savings; deeper states save more power, but
51 * require higher latency to switch to and wake up.
53 #define INTEL_RC6_ENABLE (1<<0)
54 #define INTEL_RC6p_ENABLE (1<<1)
55 #define INTEL_RC6pp_ENABLE (1<<2)
57 static void bxt_init_clock_gating(struct drm_device *dev)
59 struct drm_i915_private *dev_priv = dev->dev_private;
61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */
62 I915_WRITE(CHICKEN_PAR1_1,
63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
65 /* WaDisableSDEUnitClockGating:bxt */
66 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
67 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
71 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
73 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
74 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
77 * Wa: Backlight PWM may stop in the asserted state, causing backlight
80 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
81 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
82 PWM1_GATING_DIS | PWM2_GATING_DIS);
85 static void i915_pineview_get_mem_freq(struct drm_device *dev)
87 struct drm_i915_private *dev_priv = dev->dev_private;
90 tmp = I915_READ(CLKCFG);
92 switch (tmp & CLKCFG_FSB_MASK) {
94 dev_priv->fsb_freq = 533; /* 133*4 */
97 dev_priv->fsb_freq = 800; /* 200*4 */
100 dev_priv->fsb_freq = 667; /* 167*4 */
103 dev_priv->fsb_freq = 400; /* 100*4 */
107 switch (tmp & CLKCFG_MEM_MASK) {
109 dev_priv->mem_freq = 533;
112 dev_priv->mem_freq = 667;
115 dev_priv->mem_freq = 800;
119 /* detect pineview DDR3 setting */
120 tmp = I915_READ(CSHRDDR3CTL);
121 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
124 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
126 struct drm_i915_private *dev_priv = dev->dev_private;
129 ddrpll = I915_READ16(DDRMPLL1);
130 csipll = I915_READ16(CSIPLL0);
132 switch (ddrpll & 0xff) {
134 dev_priv->mem_freq = 800;
137 dev_priv->mem_freq = 1066;
140 dev_priv->mem_freq = 1333;
143 dev_priv->mem_freq = 1600;
146 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
148 dev_priv->mem_freq = 0;
152 dev_priv->ips.r_t = dev_priv->mem_freq;
154 switch (csipll & 0x3ff) {
156 dev_priv->fsb_freq = 3200;
159 dev_priv->fsb_freq = 3733;
162 dev_priv->fsb_freq = 4266;
165 dev_priv->fsb_freq = 4800;
168 dev_priv->fsb_freq = 5333;
171 dev_priv->fsb_freq = 5866;
174 dev_priv->fsb_freq = 6400;
177 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
179 dev_priv->fsb_freq = 0;
183 if (dev_priv->fsb_freq == 3200) {
184 dev_priv->ips.c_m = 0;
185 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
186 dev_priv->ips.c_m = 1;
188 dev_priv->ips.c_m = 2;
192 static const struct cxsr_latency cxsr_latency_table[] = {
193 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
194 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
195 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
196 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
197 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
199 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
200 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
201 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
202 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
203 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
205 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
206 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
207 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
208 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
209 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
211 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
212 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
213 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
214 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
215 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
217 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
218 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
219 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
220 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
221 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
223 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
224 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
225 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
226 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
227 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
230 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
235 const struct cxsr_latency *latency;
238 if (fsb == 0 || mem == 0)
241 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
242 latency = &cxsr_latency_table[i];
243 if (is_desktop == latency->is_desktop &&
244 is_ddr3 == latency->is_ddr3 &&
245 fsb == latency->fsb_freq && mem == latency->mem_freq)
249 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
254 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
258 mutex_lock(&dev_priv->rps.hw_lock);
260 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
262 val &= ~FORCE_DDR_HIGH_FREQ;
264 val |= FORCE_DDR_HIGH_FREQ;
265 val &= ~FORCE_DDR_LOW_FREQ;
266 val |= FORCE_DDR_FREQ_REQ_ACK;
267 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
269 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
270 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
271 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
273 mutex_unlock(&dev_priv->rps.hw_lock);
276 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
280 mutex_lock(&dev_priv->rps.hw_lock);
282 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
284 val |= DSP_MAXFIFO_PM5_ENABLE;
286 val &= ~DSP_MAXFIFO_PM5_ENABLE;
287 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
289 mutex_unlock(&dev_priv->rps.hw_lock);
292 #define FW_WM(value, plane) \
293 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
295 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
297 struct drm_device *dev = dev_priv->dev;
300 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
301 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
302 POSTING_READ(FW_BLC_SELF_VLV);
303 dev_priv->wm.vlv.cxsr = enable;
304 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
305 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
306 POSTING_READ(FW_BLC_SELF);
307 } else if (IS_PINEVIEW(dev)) {
308 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
309 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
310 I915_WRITE(DSPFW3, val);
311 POSTING_READ(DSPFW3);
312 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
313 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
314 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
315 I915_WRITE(FW_BLC_SELF, val);
316 POSTING_READ(FW_BLC_SELF);
317 } else if (IS_I915GM(dev)) {
318 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
319 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
320 I915_WRITE(INSTPM, val);
321 POSTING_READ(INSTPM);
326 DRM_DEBUG_KMS("memory self-refresh is %s\n",
327 enable ? "enabled" : "disabled");
332 * Latency for FIFO fetches is dependent on several factors:
333 * - memory configuration (speed, channels)
335 * - current MCH state
336 * It can be fairly high in some situations, so here we assume a fairly
337 * pessimal value. It's a tradeoff between extra memory fetches (if we
338 * set this value too high, the FIFO will fetch frequently to stay full)
339 * and power consumption (set it too low to save power and we might see
340 * FIFO underruns and display "flicker").
342 * A value of 5us seems to be a good balance; safe for very low end
343 * platforms but not overly aggressive on lower latency configs.
345 static const int pessimal_latency_ns = 5000;
347 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
348 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
350 static int vlv_get_fifo_size(struct drm_device *dev,
351 enum pipe pipe, int plane)
353 struct drm_i915_private *dev_priv = dev->dev_private;
354 int sprite0_start, sprite1_start, size;
357 uint32_t dsparb, dsparb2, dsparb3;
359 dsparb = I915_READ(DSPARB);
360 dsparb2 = I915_READ(DSPARB2);
361 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
362 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
365 dsparb = I915_READ(DSPARB);
366 dsparb2 = I915_READ(DSPARB2);
367 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
368 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
371 dsparb2 = I915_READ(DSPARB2);
372 dsparb3 = I915_READ(DSPARB3);
373 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
374 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
382 size = sprite0_start;
385 size = sprite1_start - sprite0_start;
388 size = 512 - 1 - sprite1_start;
394 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
395 pipe_name(pipe), plane == 0 ? "primary" : "sprite",
396 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
402 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
404 struct drm_i915_private *dev_priv = dev->dev_private;
405 uint32_t dsparb = I915_READ(DSPARB);
408 size = dsparb & 0x7f;
410 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
412 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
413 plane ? "B" : "A", size);
418 static int i830_get_fifo_size(struct drm_device *dev, int plane)
420 struct drm_i915_private *dev_priv = dev->dev_private;
421 uint32_t dsparb = I915_READ(DSPARB);
424 size = dsparb & 0x1ff;
426 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
427 size >>= 1; /* Convert to cachelines */
429 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
430 plane ? "B" : "A", size);
435 static int i845_get_fifo_size(struct drm_device *dev, int plane)
437 struct drm_i915_private *dev_priv = dev->dev_private;
438 uint32_t dsparb = I915_READ(DSPARB);
441 size = dsparb & 0x7f;
442 size >>= 2; /* Convert to cachelines */
444 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
451 /* Pineview has different values for various configs */
452 static const struct intel_watermark_params pineview_display_wm = {
453 .fifo_size = PINEVIEW_DISPLAY_FIFO,
454 .max_wm = PINEVIEW_MAX_WM,
455 .default_wm = PINEVIEW_DFT_WM,
456 .guard_size = PINEVIEW_GUARD_WM,
457 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
459 static const struct intel_watermark_params pineview_display_hplloff_wm = {
460 .fifo_size = PINEVIEW_DISPLAY_FIFO,
461 .max_wm = PINEVIEW_MAX_WM,
462 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
463 .guard_size = PINEVIEW_GUARD_WM,
464 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
466 static const struct intel_watermark_params pineview_cursor_wm = {
467 .fifo_size = PINEVIEW_CURSOR_FIFO,
468 .max_wm = PINEVIEW_CURSOR_MAX_WM,
469 .default_wm = PINEVIEW_CURSOR_DFT_WM,
470 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
471 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
473 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
474 .fifo_size = PINEVIEW_CURSOR_FIFO,
475 .max_wm = PINEVIEW_CURSOR_MAX_WM,
476 .default_wm = PINEVIEW_CURSOR_DFT_WM,
477 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
478 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
480 static const struct intel_watermark_params g4x_wm_info = {
481 .fifo_size = G4X_FIFO_SIZE,
482 .max_wm = G4X_MAX_WM,
483 .default_wm = G4X_MAX_WM,
485 .cacheline_size = G4X_FIFO_LINE_SIZE,
487 static const struct intel_watermark_params g4x_cursor_wm_info = {
488 .fifo_size = I965_CURSOR_FIFO,
489 .max_wm = I965_CURSOR_MAX_WM,
490 .default_wm = I965_CURSOR_DFT_WM,
492 .cacheline_size = G4X_FIFO_LINE_SIZE,
494 static const struct intel_watermark_params i965_cursor_wm_info = {
495 .fifo_size = I965_CURSOR_FIFO,
496 .max_wm = I965_CURSOR_MAX_WM,
497 .default_wm = I965_CURSOR_DFT_WM,
499 .cacheline_size = I915_FIFO_LINE_SIZE,
501 static const struct intel_watermark_params i945_wm_info = {
502 .fifo_size = I945_FIFO_SIZE,
503 .max_wm = I915_MAX_WM,
506 .cacheline_size = I915_FIFO_LINE_SIZE,
508 static const struct intel_watermark_params i915_wm_info = {
509 .fifo_size = I915_FIFO_SIZE,
510 .max_wm = I915_MAX_WM,
513 .cacheline_size = I915_FIFO_LINE_SIZE,
515 static const struct intel_watermark_params i830_a_wm_info = {
516 .fifo_size = I855GM_FIFO_SIZE,
517 .max_wm = I915_MAX_WM,
520 .cacheline_size = I830_FIFO_LINE_SIZE,
522 static const struct intel_watermark_params i830_bc_wm_info = {
523 .fifo_size = I855GM_FIFO_SIZE,
524 .max_wm = I915_MAX_WM/2,
527 .cacheline_size = I830_FIFO_LINE_SIZE,
529 static const struct intel_watermark_params i845_wm_info = {
530 .fifo_size = I830_FIFO_SIZE,
531 .max_wm = I915_MAX_WM,
534 .cacheline_size = I830_FIFO_LINE_SIZE,
538 * intel_calculate_wm - calculate watermark level
539 * @clock_in_khz: pixel clock
540 * @wm: chip FIFO params
541 * @cpp: bytes per pixel
542 * @latency_ns: memory latency for the platform
544 * Calculate the watermark level (the level at which the display plane will
545 * start fetching from memory again). Each chip has a different display
546 * FIFO size and allocation, so the caller needs to figure that out and pass
547 * in the correct intel_watermark_params structure.
549 * As the pixel clock runs, the FIFO will be drained at a rate that depends
550 * on the pixel size. When it reaches the watermark level, it'll start
551 * fetching FIFO line sized based chunks from memory until the FIFO fills
552 * past the watermark point. If the FIFO drains completely, a FIFO underrun
553 * will occur, and a display engine hang could result.
555 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
556 const struct intel_watermark_params *wm,
557 int fifo_size, int cpp,
558 unsigned long latency_ns)
560 long entries_required, wm_size;
563 * Note: we need to make sure we don't overflow for various clock &
565 * clocks go from a few thousand to several hundred thousand.
566 * latency is usually a few thousand
568 entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
570 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
572 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
574 wm_size = fifo_size - (entries_required + wm->guard_size);
576 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
578 /* Don't promote wm_size to unsigned... */
579 if (wm_size > (long)wm->max_wm)
580 wm_size = wm->max_wm;
582 wm_size = wm->default_wm;
585 * Bspec seems to indicate that the value shouldn't be lower than
586 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
587 * Lets go for 8 which is the burst size since certain platforms
588 * already use a hardcoded 8 (which is what the spec says should be
597 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
599 struct drm_crtc *crtc, *enabled = NULL;
601 for_each_crtc(dev, crtc) {
602 if (intel_crtc_active(crtc)) {
612 static void pineview_update_wm(struct drm_crtc *unused_crtc)
614 struct drm_device *dev = unused_crtc->dev;
615 struct drm_i915_private *dev_priv = dev->dev_private;
616 struct drm_crtc *crtc;
617 const struct cxsr_latency *latency;
621 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
622 dev_priv->fsb_freq, dev_priv->mem_freq);
624 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
625 intel_set_memory_cxsr(dev_priv, false);
629 crtc = single_enabled_crtc(dev);
631 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
632 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
633 int clock = adjusted_mode->crtc_clock;
636 wm = intel_calculate_wm(clock, &pineview_display_wm,
637 pineview_display_wm.fifo_size,
638 cpp, latency->display_sr);
639 reg = I915_READ(DSPFW1);
640 reg &= ~DSPFW_SR_MASK;
641 reg |= FW_WM(wm, SR);
642 I915_WRITE(DSPFW1, reg);
643 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
646 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
647 pineview_display_wm.fifo_size,
648 cpp, latency->cursor_sr);
649 reg = I915_READ(DSPFW3);
650 reg &= ~DSPFW_CURSOR_SR_MASK;
651 reg |= FW_WM(wm, CURSOR_SR);
652 I915_WRITE(DSPFW3, reg);
654 /* Display HPLL off SR */
655 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
656 pineview_display_hplloff_wm.fifo_size,
657 cpp, latency->display_hpll_disable);
658 reg = I915_READ(DSPFW3);
659 reg &= ~DSPFW_HPLL_SR_MASK;
660 reg |= FW_WM(wm, HPLL_SR);
661 I915_WRITE(DSPFW3, reg);
663 /* cursor HPLL off SR */
664 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
665 pineview_display_hplloff_wm.fifo_size,
666 cpp, latency->cursor_hpll_disable);
667 reg = I915_READ(DSPFW3);
668 reg &= ~DSPFW_HPLL_CURSOR_MASK;
669 reg |= FW_WM(wm, HPLL_CURSOR);
670 I915_WRITE(DSPFW3, reg);
671 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
673 intel_set_memory_cxsr(dev_priv, true);
675 intel_set_memory_cxsr(dev_priv, false);
679 static bool g4x_compute_wm0(struct drm_device *dev,
681 const struct intel_watermark_params *display,
682 int display_latency_ns,
683 const struct intel_watermark_params *cursor,
684 int cursor_latency_ns,
688 struct drm_crtc *crtc;
689 const struct drm_display_mode *adjusted_mode;
690 int htotal, hdisplay, clock, cpp;
691 int line_time_us, line_count;
692 int entries, tlb_miss;
694 crtc = intel_get_crtc_for_plane(dev, plane);
695 if (!intel_crtc_active(crtc)) {
696 *cursor_wm = cursor->guard_size;
697 *plane_wm = display->guard_size;
701 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
702 clock = adjusted_mode->crtc_clock;
703 htotal = adjusted_mode->crtc_htotal;
704 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
705 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
707 /* Use the small buffer method to calculate plane watermark */
708 entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
709 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
712 entries = DIV_ROUND_UP(entries, display->cacheline_size);
713 *plane_wm = entries + display->guard_size;
714 if (*plane_wm > (int)display->max_wm)
715 *plane_wm = display->max_wm;
717 /* Use the large buffer method to calculate cursor watermark */
718 line_time_us = max(htotal * 1000 / clock, 1);
719 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
720 entries = line_count * crtc->cursor->state->crtc_w * cpp;
721 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
724 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
725 *cursor_wm = entries + cursor->guard_size;
726 if (*cursor_wm > (int)cursor->max_wm)
727 *cursor_wm = (int)cursor->max_wm;
733 * Check the wm result.
735 * If any calculated watermark values is larger than the maximum value that
736 * can be programmed into the associated watermark register, that watermark
739 static bool g4x_check_srwm(struct drm_device *dev,
740 int display_wm, int cursor_wm,
741 const struct intel_watermark_params *display,
742 const struct intel_watermark_params *cursor)
744 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
745 display_wm, cursor_wm);
747 if (display_wm > display->max_wm) {
748 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
749 display_wm, display->max_wm);
753 if (cursor_wm > cursor->max_wm) {
754 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
755 cursor_wm, cursor->max_wm);
759 if (!(display_wm || cursor_wm)) {
760 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
767 static bool g4x_compute_srwm(struct drm_device *dev,
770 const struct intel_watermark_params *display,
771 const struct intel_watermark_params *cursor,
772 int *display_wm, int *cursor_wm)
774 struct drm_crtc *crtc;
775 const struct drm_display_mode *adjusted_mode;
776 int hdisplay, htotal, cpp, clock;
777 unsigned long line_time_us;
778 int line_count, line_size;
783 *display_wm = *cursor_wm = 0;
787 crtc = intel_get_crtc_for_plane(dev, plane);
788 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
789 clock = adjusted_mode->crtc_clock;
790 htotal = adjusted_mode->crtc_htotal;
791 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
792 cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
794 line_time_us = max(htotal * 1000 / clock, 1);
795 line_count = (latency_ns / line_time_us + 1000) / 1000;
796 line_size = hdisplay * cpp;
798 /* Use the minimum of the small and large buffer method for primary */
799 small = ((clock * cpp / 1000) * latency_ns) / 1000;
800 large = line_count * line_size;
802 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
803 *display_wm = entries + display->guard_size;
805 /* calculate the self-refresh watermark for display cursor */
806 entries = line_count * cpp * crtc->cursor->state->crtc_w;
807 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
808 *cursor_wm = entries + cursor->guard_size;
810 return g4x_check_srwm(dev,
811 *display_wm, *cursor_wm,
815 #define FW_WM_VLV(value, plane) \
816 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
818 static void vlv_write_wm_values(struct intel_crtc *crtc,
819 const struct vlv_wm_values *wm)
821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
822 enum pipe pipe = crtc->pipe;
824 I915_WRITE(VLV_DDL(pipe),
825 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
826 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
827 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
828 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
831 FW_WM(wm->sr.plane, SR) |
832 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
833 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
834 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
836 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
837 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
838 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
840 FW_WM(wm->sr.cursor, CURSOR_SR));
842 if (IS_CHERRYVIEW(dev_priv)) {
843 I915_WRITE(DSPFW7_CHV,
844 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
845 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
846 I915_WRITE(DSPFW8_CHV,
847 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
848 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
849 I915_WRITE(DSPFW9_CHV,
850 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
851 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
853 FW_WM(wm->sr.plane >> 9, SR_HI) |
854 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
855 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
856 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
857 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
858 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
859 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
860 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
861 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
862 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
865 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
866 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
868 FW_WM(wm->sr.plane >> 9, SR_HI) |
869 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
870 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
871 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
872 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
873 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
874 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
877 /* zero (unused) WM1 watermarks */
878 I915_WRITE(DSPFW4, 0);
879 I915_WRITE(DSPFW5, 0);
880 I915_WRITE(DSPFW6, 0);
881 I915_WRITE(DSPHOWM1, 0);
883 POSTING_READ(DSPFW1);
891 VLV_WM_LEVEL_DDR_DVFS,
894 /* latency must be in 0.1us units. */
895 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
896 unsigned int pipe_htotal,
897 unsigned int horiz_pixels,
899 unsigned int latency)
903 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
904 ret = (ret + 1) * horiz_pixels * cpp;
905 ret = DIV_ROUND_UP(ret, 64);
910 static void vlv_setup_wm_latency(struct drm_device *dev)
912 struct drm_i915_private *dev_priv = dev->dev_private;
914 /* all latencies in usec */
915 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
917 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
919 if (IS_CHERRYVIEW(dev_priv)) {
920 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
921 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
923 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
927 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
928 struct intel_crtc *crtc,
929 const struct intel_plane_state *state,
932 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
933 int clock, htotal, cpp, width, wm;
935 if (dev_priv->wm.pri_latency[level] == 0)
941 cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
942 clock = crtc->config->base.adjusted_mode.crtc_clock;
943 htotal = crtc->config->base.adjusted_mode.crtc_htotal;
944 width = crtc->config->pipe_src_w;
945 if (WARN_ON(htotal == 0))
948 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
950 * FIXME the formula gives values that are
951 * too big for the cursor FIFO, and hence we
952 * would never be able to use cursors. For
953 * now just hardcode the watermark.
957 wm = vlv_wm_method2(clock, htotal, width, cpp,
958 dev_priv->wm.pri_latency[level] * 10);
961 return min_t(int, wm, USHRT_MAX);
964 static void vlv_compute_fifo(struct intel_crtc *crtc)
966 struct drm_device *dev = crtc->base.dev;
967 struct vlv_wm_state *wm_state = &crtc->wm_state;
968 struct intel_plane *plane;
969 unsigned int total_rate = 0;
970 const int fifo_size = 512 - 1;
971 int fifo_extra, fifo_left = fifo_size;
973 for_each_intel_plane_on_crtc(dev, crtc, plane) {
974 struct intel_plane_state *state =
975 to_intel_plane_state(plane->base.state);
977 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
980 if (state->visible) {
981 wm_state->num_active_planes++;
982 total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
986 for_each_intel_plane_on_crtc(dev, crtc, plane) {
987 struct intel_plane_state *state =
988 to_intel_plane_state(plane->base.state);
991 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
992 plane->wm.fifo_size = 63;
996 if (!state->visible) {
997 plane->wm.fifo_size = 0;
1001 rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1002 plane->wm.fifo_size = fifo_size * rate / total_rate;
1003 fifo_left -= plane->wm.fifo_size;
1006 fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1008 /* spread the remainder evenly */
1009 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1015 if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1018 /* give it all to the first plane if none are active */
1019 if (plane->wm.fifo_size == 0 &&
1020 wm_state->num_active_planes)
1023 plane_extra = min(fifo_extra, fifo_left);
1024 plane->wm.fifo_size += plane_extra;
1025 fifo_left -= plane_extra;
1028 WARN_ON(fifo_left != 0);
1031 static void vlv_invert_wms(struct intel_crtc *crtc)
1033 struct vlv_wm_state *wm_state = &crtc->wm_state;
1036 for (level = 0; level < wm_state->num_levels; level++) {
1037 struct drm_device *dev = crtc->base.dev;
1038 const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1039 struct intel_plane *plane;
1041 wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1042 wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1044 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1045 switch (plane->base.type) {
1047 case DRM_PLANE_TYPE_CURSOR:
1048 wm_state->wm[level].cursor = plane->wm.fifo_size -
1049 wm_state->wm[level].cursor;
1051 case DRM_PLANE_TYPE_PRIMARY:
1052 wm_state->wm[level].primary = plane->wm.fifo_size -
1053 wm_state->wm[level].primary;
1055 case DRM_PLANE_TYPE_OVERLAY:
1056 sprite = plane->plane;
1057 wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1058 wm_state->wm[level].sprite[sprite];
1065 static void vlv_compute_wm(struct intel_crtc *crtc)
1067 struct drm_device *dev = crtc->base.dev;
1068 struct vlv_wm_state *wm_state = &crtc->wm_state;
1069 struct intel_plane *plane;
1070 int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1073 memset(wm_state, 0, sizeof(*wm_state));
1075 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1076 wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1078 wm_state->num_active_planes = 0;
1080 vlv_compute_fifo(crtc);
1082 if (wm_state->num_active_planes != 1)
1083 wm_state->cxsr = false;
1085 if (wm_state->cxsr) {
1086 for (level = 0; level < wm_state->num_levels; level++) {
1087 wm_state->sr[level].plane = sr_fifo_size;
1088 wm_state->sr[level].cursor = 63;
1092 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1093 struct intel_plane_state *state =
1094 to_intel_plane_state(plane->base.state);
1096 if (!state->visible)
1099 /* normal watermarks */
1100 for (level = 0; level < wm_state->num_levels; level++) {
1101 int wm = vlv_compute_wm_level(plane, crtc, state, level);
1102 int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1105 if (WARN_ON(level == 0 && wm > max_wm))
1108 if (wm > plane->wm.fifo_size)
1111 switch (plane->base.type) {
1113 case DRM_PLANE_TYPE_CURSOR:
1114 wm_state->wm[level].cursor = wm;
1116 case DRM_PLANE_TYPE_PRIMARY:
1117 wm_state->wm[level].primary = wm;
1119 case DRM_PLANE_TYPE_OVERLAY:
1120 sprite = plane->plane;
1121 wm_state->wm[level].sprite[sprite] = wm;
1126 wm_state->num_levels = level;
1128 if (!wm_state->cxsr)
1131 /* maxfifo watermarks */
1132 switch (plane->base.type) {
1134 case DRM_PLANE_TYPE_CURSOR:
1135 for (level = 0; level < wm_state->num_levels; level++)
1136 wm_state->sr[level].cursor =
1137 wm_state->wm[level].cursor;
1139 case DRM_PLANE_TYPE_PRIMARY:
1140 for (level = 0; level < wm_state->num_levels; level++)
1141 wm_state->sr[level].plane =
1142 min(wm_state->sr[level].plane,
1143 wm_state->wm[level].primary);
1145 case DRM_PLANE_TYPE_OVERLAY:
1146 sprite = plane->plane;
1147 for (level = 0; level < wm_state->num_levels; level++)
1148 wm_state->sr[level].plane =
1149 min(wm_state->sr[level].plane,
1150 wm_state->wm[level].sprite[sprite]);
1155 /* clear any (partially) filled invalid levels */
1156 for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1157 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1158 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1161 vlv_invert_wms(crtc);
1164 #define VLV_FIFO(plane, value) \
1165 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1167 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1169 struct drm_device *dev = crtc->base.dev;
1170 struct drm_i915_private *dev_priv = to_i915(dev);
1171 struct intel_plane *plane;
1172 int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1174 for_each_intel_plane_on_crtc(dev, crtc, plane) {
1175 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1176 WARN_ON(plane->wm.fifo_size != 63);
1180 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1181 sprite0_start = plane->wm.fifo_size;
1182 else if (plane->plane == 0)
1183 sprite1_start = sprite0_start + plane->wm.fifo_size;
1185 fifo_size = sprite1_start + plane->wm.fifo_size;
1188 WARN_ON(fifo_size != 512 - 1);
1190 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1191 pipe_name(crtc->pipe), sprite0_start,
1192 sprite1_start, fifo_size);
1194 switch (crtc->pipe) {
1195 uint32_t dsparb, dsparb2, dsparb3;
1197 dsparb = I915_READ(DSPARB);
1198 dsparb2 = I915_READ(DSPARB2);
1200 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1201 VLV_FIFO(SPRITEB, 0xff));
1202 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1203 VLV_FIFO(SPRITEB, sprite1_start));
1205 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1206 VLV_FIFO(SPRITEB_HI, 0x1));
1207 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1208 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1210 I915_WRITE(DSPARB, dsparb);
1211 I915_WRITE(DSPARB2, dsparb2);
1214 dsparb = I915_READ(DSPARB);
1215 dsparb2 = I915_READ(DSPARB2);
1217 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1218 VLV_FIFO(SPRITED, 0xff));
1219 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1220 VLV_FIFO(SPRITED, sprite1_start));
1222 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1223 VLV_FIFO(SPRITED_HI, 0xff));
1224 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1225 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1227 I915_WRITE(DSPARB, dsparb);
1228 I915_WRITE(DSPARB2, dsparb2);
1231 dsparb3 = I915_READ(DSPARB3);
1232 dsparb2 = I915_READ(DSPARB2);
1234 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1235 VLV_FIFO(SPRITEF, 0xff));
1236 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1237 VLV_FIFO(SPRITEF, sprite1_start));
1239 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1240 VLV_FIFO(SPRITEF_HI, 0xff));
1241 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1242 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1244 I915_WRITE(DSPARB3, dsparb3);
1245 I915_WRITE(DSPARB2, dsparb2);
1254 static void vlv_merge_wm(struct drm_device *dev,
1255 struct vlv_wm_values *wm)
1257 struct intel_crtc *crtc;
1258 int num_active_crtcs = 0;
1260 wm->level = to_i915(dev)->wm.max_level;
1263 for_each_intel_crtc(dev, crtc) {
1264 const struct vlv_wm_state *wm_state = &crtc->wm_state;
1269 if (!wm_state->cxsr)
1273 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1276 if (num_active_crtcs != 1)
1279 if (num_active_crtcs > 1)
1280 wm->level = VLV_WM_LEVEL_PM2;
1282 for_each_intel_crtc(dev, crtc) {
1283 struct vlv_wm_state *wm_state = &crtc->wm_state;
1284 enum pipe pipe = crtc->pipe;
1289 wm->pipe[pipe] = wm_state->wm[wm->level];
1291 wm->sr = wm_state->sr[wm->level];
1293 wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1294 wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1295 wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1296 wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1300 static void vlv_update_wm(struct drm_crtc *crtc)
1302 struct drm_device *dev = crtc->dev;
1303 struct drm_i915_private *dev_priv = dev->dev_private;
1304 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1305 enum pipe pipe = intel_crtc->pipe;
1306 struct vlv_wm_values wm = {};
1308 vlv_compute_wm(intel_crtc);
1309 vlv_merge_wm(dev, &wm);
1311 if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1312 /* FIXME should be part of crtc atomic commit */
1313 vlv_pipe_set_fifo_size(intel_crtc);
1317 if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1318 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1319 chv_set_memory_dvfs(dev_priv, false);
1321 if (wm.level < VLV_WM_LEVEL_PM5 &&
1322 dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1323 chv_set_memory_pm5(dev_priv, false);
1325 if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1326 intel_set_memory_cxsr(dev_priv, false);
1328 /* FIXME should be part of crtc atomic commit */
1329 vlv_pipe_set_fifo_size(intel_crtc);
1331 vlv_write_wm_values(intel_crtc, &wm);
1333 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1334 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1335 pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1336 wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1337 wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1339 if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1340 intel_set_memory_cxsr(dev_priv, true);
1342 if (wm.level >= VLV_WM_LEVEL_PM5 &&
1343 dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1344 chv_set_memory_pm5(dev_priv, true);
1346 if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1347 dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1348 chv_set_memory_dvfs(dev_priv, true);
1350 dev_priv->wm.vlv = wm;
1353 #define single_plane_enabled(mask) is_power_of_2(mask)
1355 static void g4x_update_wm(struct drm_crtc *crtc)
1357 struct drm_device *dev = crtc->dev;
1358 static const int sr_latency_ns = 12000;
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1361 int plane_sr, cursor_sr;
1362 unsigned int enabled = 0;
1365 if (g4x_compute_wm0(dev, PIPE_A,
1366 &g4x_wm_info, pessimal_latency_ns,
1367 &g4x_cursor_wm_info, pessimal_latency_ns,
1368 &planea_wm, &cursora_wm))
1369 enabled |= 1 << PIPE_A;
1371 if (g4x_compute_wm0(dev, PIPE_B,
1372 &g4x_wm_info, pessimal_latency_ns,
1373 &g4x_cursor_wm_info, pessimal_latency_ns,
1374 &planeb_wm, &cursorb_wm))
1375 enabled |= 1 << PIPE_B;
1377 if (single_plane_enabled(enabled) &&
1378 g4x_compute_srwm(dev, ffs(enabled) - 1,
1381 &g4x_cursor_wm_info,
1382 &plane_sr, &cursor_sr)) {
1383 cxsr_enabled = true;
1385 cxsr_enabled = false;
1386 intel_set_memory_cxsr(dev_priv, false);
1387 plane_sr = cursor_sr = 0;
1390 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1391 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1392 planea_wm, cursora_wm,
1393 planeb_wm, cursorb_wm,
1394 plane_sr, cursor_sr);
1397 FW_WM(plane_sr, SR) |
1398 FW_WM(cursorb_wm, CURSORB) |
1399 FW_WM(planeb_wm, PLANEB) |
1400 FW_WM(planea_wm, PLANEA));
1402 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1403 FW_WM(cursora_wm, CURSORA));
1404 /* HPLL off in SR has some issues on G4x... disable it */
1406 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1407 FW_WM(cursor_sr, CURSOR_SR));
1410 intel_set_memory_cxsr(dev_priv, true);
1413 static void i965_update_wm(struct drm_crtc *unused_crtc)
1415 struct drm_device *dev = unused_crtc->dev;
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417 struct drm_crtc *crtc;
1422 /* Calc sr entries for one plane configs */
1423 crtc = single_enabled_crtc(dev);
1425 /* self-refresh has much higher latency */
1426 static const int sr_latency_ns = 12000;
1427 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1428 int clock = adjusted_mode->crtc_clock;
1429 int htotal = adjusted_mode->crtc_htotal;
1430 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1431 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1432 unsigned long line_time_us;
1435 line_time_us = max(htotal * 1000 / clock, 1);
1437 /* Use ns/us then divide to preserve precision */
1438 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1440 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1441 srwm = I965_FIFO_SIZE - entries;
1445 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1448 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1449 cpp * crtc->cursor->state->crtc_w;
1450 entries = DIV_ROUND_UP(entries,
1451 i965_cursor_wm_info.cacheline_size);
1452 cursor_sr = i965_cursor_wm_info.fifo_size -
1453 (entries + i965_cursor_wm_info.guard_size);
1455 if (cursor_sr > i965_cursor_wm_info.max_wm)
1456 cursor_sr = i965_cursor_wm_info.max_wm;
1458 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1459 "cursor %d\n", srwm, cursor_sr);
1461 cxsr_enabled = true;
1463 cxsr_enabled = false;
1464 /* Turn off self refresh if both pipes are enabled */
1465 intel_set_memory_cxsr(dev_priv, false);
1468 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1471 /* 965 has limitations... */
1472 I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1476 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1477 FW_WM(8, PLANEC_OLD));
1478 /* update cursor SR watermark */
1479 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1482 intel_set_memory_cxsr(dev_priv, true);
1487 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1489 struct drm_device *dev = unused_crtc->dev;
1490 struct drm_i915_private *dev_priv = dev->dev_private;
1491 const struct intel_watermark_params *wm_info;
1496 int planea_wm, planeb_wm;
1497 struct drm_crtc *crtc, *enabled = NULL;
1500 wm_info = &i945_wm_info;
1501 else if (!IS_GEN2(dev))
1502 wm_info = &i915_wm_info;
1504 wm_info = &i830_a_wm_info;
1506 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1507 crtc = intel_get_crtc_for_plane(dev, 0);
1508 if (intel_crtc_active(crtc)) {
1509 const struct drm_display_mode *adjusted_mode;
1510 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1514 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1515 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1516 wm_info, fifo_size, cpp,
1517 pessimal_latency_ns);
1520 planea_wm = fifo_size - wm_info->guard_size;
1521 if (planea_wm > (long)wm_info->max_wm)
1522 planea_wm = wm_info->max_wm;
1526 wm_info = &i830_bc_wm_info;
1528 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1529 crtc = intel_get_crtc_for_plane(dev, 1);
1530 if (intel_crtc_active(crtc)) {
1531 const struct drm_display_mode *adjusted_mode;
1532 int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1536 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1537 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1538 wm_info, fifo_size, cpp,
1539 pessimal_latency_ns);
1540 if (enabled == NULL)
1545 planeb_wm = fifo_size - wm_info->guard_size;
1546 if (planeb_wm > (long)wm_info->max_wm)
1547 planeb_wm = wm_info->max_wm;
1550 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1552 if (IS_I915GM(dev) && enabled) {
1553 struct drm_i915_gem_object *obj;
1555 obj = intel_fb_obj(enabled->primary->state->fb);
1557 /* self-refresh seems busted with untiled */
1558 if (obj->tiling_mode == I915_TILING_NONE)
1563 * Overlay gets an aggressive default since video jitter is bad.
1567 /* Play safe and disable self-refresh before adjusting watermarks. */
1568 intel_set_memory_cxsr(dev_priv, false);
1570 /* Calc sr entries for one plane configs */
1571 if (HAS_FW_BLC(dev) && enabled) {
1572 /* self-refresh has much higher latency */
1573 static const int sr_latency_ns = 6000;
1574 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1575 int clock = adjusted_mode->crtc_clock;
1576 int htotal = adjusted_mode->crtc_htotal;
1577 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1578 int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
1579 unsigned long line_time_us;
1582 line_time_us = max(htotal * 1000 / clock, 1);
1584 /* Use ns/us then divide to preserve precision */
1585 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1587 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1588 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1589 srwm = wm_info->fifo_size - entries;
1593 if (IS_I945G(dev) || IS_I945GM(dev))
1594 I915_WRITE(FW_BLC_SELF,
1595 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1596 else if (IS_I915GM(dev))
1597 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1600 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1601 planea_wm, planeb_wm, cwm, srwm);
1603 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1604 fwater_hi = (cwm & 0x1f);
1606 /* Set request length to 8 cachelines per fetch */
1607 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1608 fwater_hi = fwater_hi | (1 << 8);
1610 I915_WRITE(FW_BLC, fwater_lo);
1611 I915_WRITE(FW_BLC2, fwater_hi);
1614 intel_set_memory_cxsr(dev_priv, true);
1617 static void i845_update_wm(struct drm_crtc *unused_crtc)
1619 struct drm_device *dev = unused_crtc->dev;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621 struct drm_crtc *crtc;
1622 const struct drm_display_mode *adjusted_mode;
1626 crtc = single_enabled_crtc(dev);
1630 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1631 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1633 dev_priv->display.get_fifo_size(dev, 0),
1634 4, pessimal_latency_ns);
1635 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1636 fwater_lo |= (3<<8) | planea_wm;
1638 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1640 I915_WRITE(FW_BLC, fwater_lo);
1643 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1645 uint32_t pixel_rate;
1647 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1649 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1650 * adjust the pixel_rate here. */
1652 if (pipe_config->pch_pfit.enabled) {
1653 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1654 uint32_t pfit_size = pipe_config->pch_pfit.size;
1656 pipe_w = pipe_config->pipe_src_w;
1657 pipe_h = pipe_config->pipe_src_h;
1659 pfit_w = (pfit_size >> 16) & 0xFFFF;
1660 pfit_h = pfit_size & 0xFFFF;
1661 if (pipe_w < pfit_w)
1663 if (pipe_h < pfit_h)
1666 if (WARN_ON(!pfit_w || !pfit_h))
1669 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1676 /* latency must be in 0.1us units. */
1677 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1681 if (WARN(latency == 0, "Latency value missing\n"))
1684 ret = (uint64_t) pixel_rate * cpp * latency;
1685 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1690 /* latency must be in 0.1us units. */
1691 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1692 uint32_t horiz_pixels, uint8_t cpp,
1697 if (WARN(latency == 0, "Latency value missing\n"))
1699 if (WARN_ON(!pipe_htotal))
1702 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1703 ret = (ret + 1) * horiz_pixels * cpp;
1704 ret = DIV_ROUND_UP(ret, 64) + 2;
1708 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1712 * Neither of these should be possible since this function shouldn't be
1713 * called if the CRTC is off or the plane is invisible. But let's be
1714 * extra paranoid to avoid a potential divide-by-zero if we screw up
1715 * elsewhere in the driver.
1719 if (WARN_ON(!horiz_pixels))
1722 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1725 struct ilk_wm_maximums {
1733 * For both WM_PIPE and WM_LP.
1734 * mem_value must be in 0.1us units.
1736 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1737 const struct intel_plane_state *pstate,
1741 int cpp = pstate->base.fb ?
1742 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1743 uint32_t method1, method2;
1745 if (!cstate->base.active || !pstate->visible)
1748 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1753 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1754 cstate->base.adjusted_mode.crtc_htotal,
1755 drm_rect_width(&pstate->dst),
1758 return min(method1, method2);
1762 * For both WM_PIPE and WM_LP.
1763 * mem_value must be in 0.1us units.
1765 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1766 const struct intel_plane_state *pstate,
1769 int cpp = pstate->base.fb ?
1770 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1771 uint32_t method1, method2;
1773 if (!cstate->base.active || !pstate->visible)
1776 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1777 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1778 cstate->base.adjusted_mode.crtc_htotal,
1779 drm_rect_width(&pstate->dst),
1781 return min(method1, method2);
1785 * For both WM_PIPE and WM_LP.
1786 * mem_value must be in 0.1us units.
1788 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1789 const struct intel_plane_state *pstate,
1793 * We treat the cursor plane as always-on for the purposes of watermark
1794 * calculation. Until we have two-stage watermark programming merged,
1795 * this is necessary to avoid flickering.
1798 int width = pstate->visible ? pstate->base.crtc_w : 64;
1800 if (!cstate->base.active)
1803 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1804 cstate->base.adjusted_mode.crtc_htotal,
1805 width, cpp, mem_value);
1808 /* Only for WM_LP. */
1809 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1810 const struct intel_plane_state *pstate,
1813 int cpp = pstate->base.fb ?
1814 drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1816 if (!cstate->base.active || !pstate->visible)
1819 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
1822 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1824 if (INTEL_INFO(dev)->gen >= 8)
1826 else if (INTEL_INFO(dev)->gen >= 7)
1832 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1833 int level, bool is_sprite)
1835 if (INTEL_INFO(dev)->gen >= 8)
1836 /* BDW primary/sprite plane watermarks */
1837 return level == 0 ? 255 : 2047;
1838 else if (INTEL_INFO(dev)->gen >= 7)
1839 /* IVB/HSW primary/sprite plane watermarks */
1840 return level == 0 ? 127 : 1023;
1841 else if (!is_sprite)
1842 /* ILK/SNB primary plane watermarks */
1843 return level == 0 ? 127 : 511;
1845 /* ILK/SNB sprite plane watermarks */
1846 return level == 0 ? 63 : 255;
1849 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1852 if (INTEL_INFO(dev)->gen >= 7)
1853 return level == 0 ? 63 : 255;
1855 return level == 0 ? 31 : 63;
1858 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1860 if (INTEL_INFO(dev)->gen >= 8)
1866 /* Calculate the maximum primary/sprite plane watermark */
1867 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1869 const struct intel_wm_config *config,
1870 enum intel_ddb_partitioning ddb_partitioning,
1873 unsigned int fifo_size = ilk_display_fifo_size(dev);
1875 /* if sprites aren't enabled, sprites get nothing */
1876 if (is_sprite && !config->sprites_enabled)
1879 /* HSW allows LP1+ watermarks even with multiple pipes */
1880 if (level == 0 || config->num_pipes_active > 1) {
1881 fifo_size /= INTEL_INFO(dev)->num_pipes;
1884 * For some reason the non self refresh
1885 * FIFO size is only half of the self
1886 * refresh FIFO size on ILK/SNB.
1888 if (INTEL_INFO(dev)->gen <= 6)
1892 if (config->sprites_enabled) {
1893 /* level 0 is always calculated with 1:1 split */
1894 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1903 /* clamp to max that the registers can hold */
1904 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1907 /* Calculate the maximum cursor plane watermark */
1908 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1910 const struct intel_wm_config *config)
1912 /* HSW LP1+ watermarks w/ multiple pipes */
1913 if (level > 0 && config->num_pipes_active > 1)
1916 /* otherwise just report max that registers can hold */
1917 return ilk_cursor_wm_reg_max(dev, level);
1920 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1922 const struct intel_wm_config *config,
1923 enum intel_ddb_partitioning ddb_partitioning,
1924 struct ilk_wm_maximums *max)
1926 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1927 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1928 max->cur = ilk_cursor_wm_max(dev, level, config);
1929 max->fbc = ilk_fbc_wm_reg_max(dev);
1932 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1934 struct ilk_wm_maximums *max)
1936 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1937 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1938 max->cur = ilk_cursor_wm_reg_max(dev, level);
1939 max->fbc = ilk_fbc_wm_reg_max(dev);
1942 static bool ilk_validate_wm_level(int level,
1943 const struct ilk_wm_maximums *max,
1944 struct intel_wm_level *result)
1948 /* already determined to be invalid? */
1949 if (!result->enable)
1952 result->enable = result->pri_val <= max->pri &&
1953 result->spr_val <= max->spr &&
1954 result->cur_val <= max->cur;
1956 ret = result->enable;
1959 * HACK until we can pre-compute everything,
1960 * and thus fail gracefully if LP0 watermarks
1963 if (level == 0 && !result->enable) {
1964 if (result->pri_val > max->pri)
1965 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1966 level, result->pri_val, max->pri);
1967 if (result->spr_val > max->spr)
1968 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1969 level, result->spr_val, max->spr);
1970 if (result->cur_val > max->cur)
1971 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1972 level, result->cur_val, max->cur);
1974 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1975 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1976 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1977 result->enable = true;
1983 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1984 const struct intel_crtc *intel_crtc,
1986 struct intel_crtc_state *cstate,
1987 struct intel_plane_state *pristate,
1988 struct intel_plane_state *sprstate,
1989 struct intel_plane_state *curstate,
1990 struct intel_wm_level *result)
1992 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1993 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1994 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1996 /* WM1+ latency values stored in 0.5us units */
2004 result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2005 pri_latency, level);
2006 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2010 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2013 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2015 result->enable = true;
2019 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2021 const struct intel_atomic_state *intel_state =
2022 to_intel_atomic_state(cstate->base.state);
2023 const struct drm_display_mode *adjusted_mode =
2024 &cstate->base.adjusted_mode;
2025 u32 linetime, ips_linetime;
2027 if (!cstate->base.active)
2029 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2031 if (WARN_ON(intel_state->cdclk == 0))
2034 /* The WM are computed with base on how long it takes to fill a single
2035 * row at the given clock rate, multiplied by 8.
2037 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2038 adjusted_mode->crtc_clock);
2039 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2040 intel_state->cdclk);
2042 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2043 PIPE_WM_LINETIME_TIME(linetime);
2046 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2048 struct drm_i915_private *dev_priv = dev->dev_private;
2053 int level, max_level = ilk_wm_max_level(dev);
2055 /* read the first set of memory latencies[0:3] */
2056 val = 0; /* data0 to be programmed to 0 for first set */
2057 mutex_lock(&dev_priv->rps.hw_lock);
2058 ret = sandybridge_pcode_read(dev_priv,
2059 GEN9_PCODE_READ_MEM_LATENCY,
2061 mutex_unlock(&dev_priv->rps.hw_lock);
2064 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2068 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2069 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2070 GEN9_MEM_LATENCY_LEVEL_MASK;
2071 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2072 GEN9_MEM_LATENCY_LEVEL_MASK;
2073 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2074 GEN9_MEM_LATENCY_LEVEL_MASK;
2076 /* read the second set of memory latencies[4:7] */
2077 val = 1; /* data0 to be programmed to 1 for second set */
2078 mutex_lock(&dev_priv->rps.hw_lock);
2079 ret = sandybridge_pcode_read(dev_priv,
2080 GEN9_PCODE_READ_MEM_LATENCY,
2082 mutex_unlock(&dev_priv->rps.hw_lock);
2084 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2088 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2089 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2090 GEN9_MEM_LATENCY_LEVEL_MASK;
2091 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2092 GEN9_MEM_LATENCY_LEVEL_MASK;
2093 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2094 GEN9_MEM_LATENCY_LEVEL_MASK;
2097 * WaWmMemoryReadLatency:skl
2099 * punit doesn't take into account the read latency so we need
2100 * to add 2us to the various latency levels we retrieve from
2102 * - W0 is a bit special in that it's the only level that
2103 * can't be disabled if we want to have display working, so
2104 * we always add 2us there.
2105 * - For levels >=1, punit returns 0us latency when they are
2106 * disabled, so we respect that and don't add 2us then
2108 * Additionally, if a level n (n > 1) has a 0us latency, all
2109 * levels m (m >= n) need to be disabled. We make sure to
2110 * sanitize the values out of the punit to satisfy this
2114 for (level = 1; level <= max_level; level++)
2118 for (i = level + 1; i <= max_level; i++)
2123 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2124 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2126 wm[0] = (sskpd >> 56) & 0xFF;
2128 wm[0] = sskpd & 0xF;
2129 wm[1] = (sskpd >> 4) & 0xFF;
2130 wm[2] = (sskpd >> 12) & 0xFF;
2131 wm[3] = (sskpd >> 20) & 0x1FF;
2132 wm[4] = (sskpd >> 32) & 0x1FF;
2133 } else if (INTEL_INFO(dev)->gen >= 6) {
2134 uint32_t sskpd = I915_READ(MCH_SSKPD);
2136 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2137 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2138 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2139 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2140 } else if (INTEL_INFO(dev)->gen >= 5) {
2141 uint32_t mltr = I915_READ(MLTR_ILK);
2143 /* ILK primary LP0 latency is 700 ns */
2145 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2146 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2150 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2152 /* ILK sprite LP0 latency is 1300 ns */
2157 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2159 /* ILK cursor LP0 latency is 1300 ns */
2163 /* WaDoubleCursorLP3Latency:ivb */
2164 if (IS_IVYBRIDGE(dev))
2168 int ilk_wm_max_level(const struct drm_device *dev)
2170 /* how many WM levels are we expecting */
2171 if (INTEL_INFO(dev)->gen >= 9)
2173 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2175 else if (INTEL_INFO(dev)->gen >= 6)
2181 static void intel_print_wm_latency(struct drm_device *dev,
2183 const uint16_t wm[8])
2185 int level, max_level = ilk_wm_max_level(dev);
2187 for (level = 0; level <= max_level; level++) {
2188 unsigned int latency = wm[level];
2191 DRM_ERROR("%s WM%d latency not provided\n",
2197 * - latencies are in us on gen9.
2198 * - before then, WM1+ latency values are in 0.5us units
2205 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2206 name, level, wm[level],
2207 latency / 10, latency % 10);
2211 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2212 uint16_t wm[5], uint16_t min)
2214 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2219 wm[0] = max(wm[0], min);
2220 for (level = 1; level <= max_level; level++)
2221 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2226 static void snb_wm_latency_quirk(struct drm_device *dev)
2228 struct drm_i915_private *dev_priv = dev->dev_private;
2232 * The BIOS provided WM memory latency values are often
2233 * inadequate for high resolution displays. Adjust them.
2235 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2236 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2237 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2242 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2243 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2244 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2245 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2248 static void ilk_setup_wm_latency(struct drm_device *dev)
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2252 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2254 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2255 sizeof(dev_priv->wm.pri_latency));
2256 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2257 sizeof(dev_priv->wm.pri_latency));
2259 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2260 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2262 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2263 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2264 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2267 snb_wm_latency_quirk(dev);
2270 static void skl_setup_wm_latency(struct drm_device *dev)
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2274 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2275 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2278 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2279 struct intel_pipe_wm *pipe_wm)
2281 /* LP0 watermark maximums depend on this pipe alone */
2282 const struct intel_wm_config config = {
2283 .num_pipes_active = 1,
2284 .sprites_enabled = pipe_wm->sprites_enabled,
2285 .sprites_scaled = pipe_wm->sprites_scaled,
2287 struct ilk_wm_maximums max;
2289 /* LP0 watermarks always use 1/2 DDB partitioning */
2290 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2292 /* At least LP0 must be valid */
2293 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2294 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2301 /* Compute new watermarks for the pipe */
2302 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2304 struct drm_atomic_state *state = cstate->base.state;
2305 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2306 struct intel_pipe_wm *pipe_wm;
2307 struct drm_device *dev = state->dev;
2308 const struct drm_i915_private *dev_priv = dev->dev_private;
2309 struct intel_plane *intel_plane;
2310 struct intel_plane_state *pristate = NULL;
2311 struct intel_plane_state *sprstate = NULL;
2312 struct intel_plane_state *curstate = NULL;
2313 int level, max_level = ilk_wm_max_level(dev), usable_level;
2314 struct ilk_wm_maximums max;
2316 pipe_wm = &cstate->wm.ilk.optimal;
2318 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2319 struct intel_plane_state *ps;
2321 ps = intel_atomic_get_existing_plane_state(state,
2326 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2328 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2330 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2334 pipe_wm->pipe_enabled = cstate->base.active;
2336 pipe_wm->sprites_enabled = sprstate->visible;
2337 pipe_wm->sprites_scaled = sprstate->visible &&
2338 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2339 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2342 usable_level = max_level;
2344 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2345 if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2348 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2349 if (pipe_wm->sprites_scaled)
2352 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2353 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2355 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2356 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2358 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2359 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2361 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2364 ilk_compute_wm_reg_maximums(dev, 1, &max);
2366 for (level = 1; level <= max_level; level++) {
2367 struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2369 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2370 pristate, sprstate, curstate, wm);
2373 * Disable any watermark level that exceeds the
2374 * register maximums since such watermarks are
2377 if (level > usable_level)
2380 if (ilk_validate_wm_level(level, &max, wm))
2381 pipe_wm->wm[level] = *wm;
2383 usable_level = level;
2390 * Build a set of 'intermediate' watermark values that satisfy both the old
2391 * state and the new state. These can be programmed to the hardware
2394 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2395 struct intel_crtc *intel_crtc,
2396 struct intel_crtc_state *newstate)
2398 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2399 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2400 int level, max_level = ilk_wm_max_level(dev);
2403 * Start with the final, target watermarks, then combine with the
2404 * currently active watermarks to get values that are safe both before
2405 * and after the vblank.
2407 *a = newstate->wm.ilk.optimal;
2408 a->pipe_enabled |= b->pipe_enabled;
2409 a->sprites_enabled |= b->sprites_enabled;
2410 a->sprites_scaled |= b->sprites_scaled;
2412 for (level = 0; level <= max_level; level++) {
2413 struct intel_wm_level *a_wm = &a->wm[level];
2414 const struct intel_wm_level *b_wm = &b->wm[level];
2416 a_wm->enable &= b_wm->enable;
2417 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2418 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2419 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2420 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2424 * We need to make sure that these merged watermark values are
2425 * actually a valid configuration themselves. If they're not,
2426 * there's no safe way to transition from the old state to
2427 * the new state, so we need to fail the atomic transaction.
2429 if (!ilk_validate_pipe_wm(dev, a))
2433 * If our intermediate WM are identical to the final WM, then we can
2434 * omit the post-vblank programming; only update if it's different.
2436 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2437 newstate->wm.need_postvbl_update = false;
2443 * Merge the watermarks from all active pipes for a specific level.
2445 static void ilk_merge_wm_level(struct drm_device *dev,
2447 struct intel_wm_level *ret_wm)
2449 const struct intel_crtc *intel_crtc;
2451 ret_wm->enable = true;
2453 for_each_intel_crtc(dev, intel_crtc) {
2454 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2455 const struct intel_wm_level *wm = &active->wm[level];
2457 if (!active->pipe_enabled)
2461 * The watermark values may have been used in the past,
2462 * so we must maintain them in the registers for some
2463 * time even if the level is now disabled.
2466 ret_wm->enable = false;
2468 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2469 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2470 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2471 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2476 * Merge all low power watermarks for all active pipes.
2478 static void ilk_wm_merge(struct drm_device *dev,
2479 const struct intel_wm_config *config,
2480 const struct ilk_wm_maximums *max,
2481 struct intel_pipe_wm *merged)
2483 struct drm_i915_private *dev_priv = dev->dev_private;
2484 int level, max_level = ilk_wm_max_level(dev);
2485 int last_enabled_level = max_level;
2487 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2488 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2489 config->num_pipes_active > 1)
2490 last_enabled_level = 0;
2492 /* ILK: FBC WM must be disabled always */
2493 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2495 /* merge each WM1+ level */
2496 for (level = 1; level <= max_level; level++) {
2497 struct intel_wm_level *wm = &merged->wm[level];
2499 ilk_merge_wm_level(dev, level, wm);
2501 if (level > last_enabled_level)
2503 else if (!ilk_validate_wm_level(level, max, wm))
2504 /* make sure all following levels get disabled */
2505 last_enabled_level = level - 1;
2508 * The spec says it is preferred to disable
2509 * FBC WMs instead of disabling a WM level.
2511 if (wm->fbc_val > max->fbc) {
2513 merged->fbc_wm_enabled = false;
2518 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2520 * FIXME this is racy. FBC might get enabled later.
2521 * What we should check here is whether FBC can be
2522 * enabled sometime later.
2524 if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2525 intel_fbc_is_active(dev_priv)) {
2526 for (level = 2; level <= max_level; level++) {
2527 struct intel_wm_level *wm = &merged->wm[level];
2534 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2536 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2537 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2540 /* The value we need to program into the WM_LPx latency field */
2541 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2543 struct drm_i915_private *dev_priv = dev->dev_private;
2545 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2548 return dev_priv->wm.pri_latency[level];
2551 static void ilk_compute_wm_results(struct drm_device *dev,
2552 const struct intel_pipe_wm *merged,
2553 enum intel_ddb_partitioning partitioning,
2554 struct ilk_wm_values *results)
2556 struct intel_crtc *intel_crtc;
2559 results->enable_fbc_wm = merged->fbc_wm_enabled;
2560 results->partitioning = partitioning;
2562 /* LP1+ register values */
2563 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2564 const struct intel_wm_level *r;
2566 level = ilk_wm_lp_to_level(wm_lp, merged);
2568 r = &merged->wm[level];
2571 * Maintain the watermark values even if the level is
2572 * disabled. Doing otherwise could cause underruns.
2574 results->wm_lp[wm_lp - 1] =
2575 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2576 (r->pri_val << WM1_LP_SR_SHIFT) |
2580 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2582 if (INTEL_INFO(dev)->gen >= 8)
2583 results->wm_lp[wm_lp - 1] |=
2584 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2586 results->wm_lp[wm_lp - 1] |=
2587 r->fbc_val << WM1_LP_FBC_SHIFT;
2590 * Always set WM1S_LP_EN when spr_val != 0, even if the
2591 * level is disabled. Doing otherwise could cause underruns.
2593 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2594 WARN_ON(wm_lp != 1);
2595 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2597 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2600 /* LP0 register values */
2601 for_each_intel_crtc(dev, intel_crtc) {
2602 enum pipe pipe = intel_crtc->pipe;
2603 const struct intel_wm_level *r =
2604 &intel_crtc->wm.active.ilk.wm[0];
2606 if (WARN_ON(!r->enable))
2609 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2611 results->wm_pipe[pipe] =
2612 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2613 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2618 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2619 * case both are at the same level. Prefer r1 in case they're the same. */
2620 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2621 struct intel_pipe_wm *r1,
2622 struct intel_pipe_wm *r2)
2624 int level, max_level = ilk_wm_max_level(dev);
2625 int level1 = 0, level2 = 0;
2627 for (level = 1; level <= max_level; level++) {
2628 if (r1->wm[level].enable)
2630 if (r2->wm[level].enable)
2634 if (level1 == level2) {
2635 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2639 } else if (level1 > level2) {
2646 /* dirty bits used to track which watermarks need changes */
2647 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2648 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2649 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2650 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2651 #define WM_DIRTY_FBC (1 << 24)
2652 #define WM_DIRTY_DDB (1 << 25)
2654 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2655 const struct ilk_wm_values *old,
2656 const struct ilk_wm_values *new)
2658 unsigned int dirty = 0;
2662 for_each_pipe(dev_priv, pipe) {
2663 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2664 dirty |= WM_DIRTY_LINETIME(pipe);
2665 /* Must disable LP1+ watermarks too */
2666 dirty |= WM_DIRTY_LP_ALL;
2669 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2670 dirty |= WM_DIRTY_PIPE(pipe);
2671 /* Must disable LP1+ watermarks too */
2672 dirty |= WM_DIRTY_LP_ALL;
2676 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2677 dirty |= WM_DIRTY_FBC;
2678 /* Must disable LP1+ watermarks too */
2679 dirty |= WM_DIRTY_LP_ALL;
2682 if (old->partitioning != new->partitioning) {
2683 dirty |= WM_DIRTY_DDB;
2684 /* Must disable LP1+ watermarks too */
2685 dirty |= WM_DIRTY_LP_ALL;
2688 /* LP1+ watermarks already deemed dirty, no need to continue */
2689 if (dirty & WM_DIRTY_LP_ALL)
2692 /* Find the lowest numbered LP1+ watermark in need of an update... */
2693 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2694 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2695 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2699 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2700 for (; wm_lp <= 3; wm_lp++)
2701 dirty |= WM_DIRTY_LP(wm_lp);
2706 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2709 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2710 bool changed = false;
2712 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2713 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2714 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2717 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2718 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2719 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2722 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2723 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2724 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2729 * Don't touch WM1S_LP_EN here.
2730 * Doing so could cause underruns.
2737 * The spec says we shouldn't write when we don't need, because every write
2738 * causes WMs to be re-evaluated, expending some power.
2740 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2741 struct ilk_wm_values *results)
2743 struct drm_device *dev = dev_priv->dev;
2744 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2748 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2752 _ilk_disable_lp_wm(dev_priv, dirty);
2754 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2755 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2756 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2757 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2758 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2759 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2761 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2762 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2763 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2764 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2765 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2766 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2768 if (dirty & WM_DIRTY_DDB) {
2769 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2770 val = I915_READ(WM_MISC);
2771 if (results->partitioning == INTEL_DDB_PART_1_2)
2772 val &= ~WM_MISC_DATA_PARTITION_5_6;
2774 val |= WM_MISC_DATA_PARTITION_5_6;
2775 I915_WRITE(WM_MISC, val);
2777 val = I915_READ(DISP_ARB_CTL2);
2778 if (results->partitioning == INTEL_DDB_PART_1_2)
2779 val &= ~DISP_DATA_PARTITION_5_6;
2781 val |= DISP_DATA_PARTITION_5_6;
2782 I915_WRITE(DISP_ARB_CTL2, val);
2786 if (dirty & WM_DIRTY_FBC) {
2787 val = I915_READ(DISP_ARB_CTL);
2788 if (results->enable_fbc_wm)
2789 val &= ~DISP_FBC_WM_DIS;
2791 val |= DISP_FBC_WM_DIS;
2792 I915_WRITE(DISP_ARB_CTL, val);
2795 if (dirty & WM_DIRTY_LP(1) &&
2796 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2797 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2799 if (INTEL_INFO(dev)->gen >= 7) {
2800 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2801 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2802 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2803 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2806 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2807 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2808 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2809 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2810 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2811 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2813 dev_priv->wm.hw = *results;
2816 bool ilk_disable_lp_wm(struct drm_device *dev)
2818 struct drm_i915_private *dev_priv = dev->dev_private;
2820 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2824 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2825 * different active planes.
2828 #define SKL_DDB_SIZE 896 /* in blocks */
2829 #define BXT_DDB_SIZE 512
2832 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2833 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2834 * other universal planes are in indices 1..n. Note that this may leave unused
2835 * indices between the top "sprite" plane and the cursor.
2838 skl_wm_plane_id(const struct intel_plane *plane)
2840 switch (plane->base.type) {
2841 case DRM_PLANE_TYPE_PRIMARY:
2843 case DRM_PLANE_TYPE_CURSOR:
2844 return PLANE_CURSOR;
2845 case DRM_PLANE_TYPE_OVERLAY:
2846 return plane->plane + 1;
2848 MISSING_CASE(plane->base.type);
2849 return plane->plane;
2854 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2855 const struct intel_crtc_state *cstate,
2856 struct skl_ddb_entry *alloc, /* out */
2857 int *num_active /* out */)
2859 struct drm_atomic_state *state = cstate->base.state;
2860 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2861 struct drm_i915_private *dev_priv = to_i915(dev);
2862 struct drm_crtc *for_crtc = cstate->base.crtc;
2863 unsigned int pipe_size, ddb_size;
2864 int nth_active_pipe;
2865 int pipe = to_intel_crtc(for_crtc)->pipe;
2867 if (WARN_ON(!state) || !cstate->base.active) {
2870 *num_active = hweight32(dev_priv->active_crtcs);
2874 if (intel_state->active_pipe_changes)
2875 *num_active = hweight32(intel_state->active_crtcs);
2877 *num_active = hweight32(dev_priv->active_crtcs);
2879 if (IS_BROXTON(dev))
2880 ddb_size = BXT_DDB_SIZE;
2882 ddb_size = SKL_DDB_SIZE;
2884 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2887 * If the state doesn't change the active CRTC's, then there's
2888 * no need to recalculate; the existing pipe allocation limits
2889 * should remain unchanged. Note that we're safe from racing
2890 * commits since any racing commit that changes the active CRTC
2891 * list would need to grab _all_ crtc locks, including the one
2892 * we currently hold.
2894 if (!intel_state->active_pipe_changes) {
2895 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2899 nth_active_pipe = hweight32(intel_state->active_crtcs &
2900 (drm_crtc_mask(for_crtc) - 1));
2901 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2902 alloc->start = nth_active_pipe * ddb_size / *num_active;
2903 alloc->end = alloc->start + pipe_size;
2906 static unsigned int skl_cursor_allocation(int num_active)
2908 if (num_active == 1)
2914 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2916 entry->start = reg & 0x3ff;
2917 entry->end = (reg >> 16) & 0x3ff;
2922 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2923 struct skl_ddb_allocation *ddb /* out */)
2929 memset(ddb, 0, sizeof(*ddb));
2931 for_each_pipe(dev_priv, pipe) {
2932 enum intel_display_power_domain power_domain;
2934 power_domain = POWER_DOMAIN_PIPE(pipe);
2935 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2938 for_each_plane(dev_priv, pipe, plane) {
2939 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2940 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2944 val = I915_READ(CUR_BUF_CFG(pipe));
2945 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2948 intel_display_power_put(dev_priv, power_domain);
2953 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2954 const struct drm_plane_state *pstate,
2957 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2958 struct drm_framebuffer *fb = pstate->fb;
2959 uint32_t width = 0, height = 0;
2960 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
2962 if (!intel_pstate->visible)
2964 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
2966 if (y && format != DRM_FORMAT_NV12)
2969 width = drm_rect_width(&intel_pstate->src) >> 16;
2970 height = drm_rect_height(&intel_pstate->src) >> 16;
2972 if (intel_rotation_90_or_270(pstate->rotation))
2973 swap(width, height);
2975 /* for planar format */
2976 if (format == DRM_FORMAT_NV12) {
2977 if (y) /* y-plane data rate */
2978 return width * height *
2979 drm_format_plane_cpp(format, 0);
2980 else /* uv-plane data rate */
2981 return (width / 2) * (height / 2) *
2982 drm_format_plane_cpp(format, 1);
2985 /* for packed formats */
2986 return width * height * drm_format_plane_cpp(format, 0);
2990 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
2991 * a 8192x4096@32bpp framebuffer:
2992 * 3 * 4096 * 8192 * 4 < 2^32
2995 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
2997 struct drm_crtc_state *cstate = &intel_cstate->base;
2998 struct drm_atomic_state *state = cstate->state;
2999 struct drm_crtc *crtc = cstate->crtc;
3000 struct drm_device *dev = crtc->dev;
3001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3002 const struct drm_plane *plane;
3003 const struct intel_plane *intel_plane;
3004 struct drm_plane_state *pstate;
3005 unsigned int rate, total_data_rate = 0;
3009 if (WARN_ON(!state))
3012 /* Calculate and cache data rate for each plane */
3013 for_each_plane_in_state(state, plane, pstate, i) {
3014 id = skl_wm_plane_id(to_intel_plane(plane));
3015 intel_plane = to_intel_plane(plane);
3017 if (intel_plane->pipe != intel_crtc->pipe)
3021 rate = skl_plane_relative_data_rate(intel_cstate,
3023 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3026 rate = skl_plane_relative_data_rate(intel_cstate,
3028 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3031 /* Calculate CRTC's total data rate from cached values */
3032 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3033 int id = skl_wm_plane_id(intel_plane);
3036 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3037 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3040 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3042 return total_data_rate;
3046 skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3049 struct drm_framebuffer *fb = pstate->fb;
3050 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3051 uint32_t src_w, src_h;
3052 uint32_t min_scanlines = 8;
3058 /* For packed formats, no y-plane, return 0 */
3059 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3062 /* For Non Y-tile return 8-blocks */
3063 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3064 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3067 src_w = drm_rect_width(&intel_pstate->src) >> 16;
3068 src_h = drm_rect_height(&intel_pstate->src) >> 16;
3070 if (intel_rotation_90_or_270(pstate->rotation))
3073 /* Halve UV plane width and height for NV12 */
3074 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3079 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3080 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3082 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3084 if (intel_rotation_90_or_270(pstate->rotation)) {
3085 switch (plane_bpp) {
3099 WARN(1, "Unsupported pixel depth %u for rotation",
3105 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3109 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3110 struct skl_ddb_allocation *ddb /* out */)
3112 struct drm_atomic_state *state = cstate->base.state;
3113 struct drm_crtc *crtc = cstate->base.crtc;
3114 struct drm_device *dev = crtc->dev;
3115 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3116 struct intel_plane *intel_plane;
3117 struct drm_plane *plane;
3118 struct drm_plane_state *pstate;
3119 enum pipe pipe = intel_crtc->pipe;
3120 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3121 uint16_t alloc_size, start, cursor_blocks;
3122 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3123 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3124 unsigned int total_data_rate;
3128 if (WARN_ON(!state))
3131 if (!cstate->base.active) {
3132 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3133 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3134 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3138 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3139 alloc_size = skl_ddb_entry_size(alloc);
3140 if (alloc_size == 0) {
3141 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3145 cursor_blocks = skl_cursor_allocation(num_active);
3146 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3147 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3149 alloc_size -= cursor_blocks;
3151 /* 1. Allocate the mininum required blocks for each active plane */
3152 for_each_plane_in_state(state, plane, pstate, i) {
3153 intel_plane = to_intel_plane(plane);
3154 id = skl_wm_plane_id(intel_plane);
3156 if (intel_plane->pipe != pipe)
3159 if (!to_intel_plane_state(pstate)->visible) {
3164 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3170 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3171 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3174 for (i = 0; i < PLANE_CURSOR; i++) {
3175 alloc_size -= minimum[i];
3176 alloc_size -= y_minimum[i];
3180 * 2. Distribute the remaining space in proportion to the amount of
3181 * data each plane needs to fetch from memory.
3183 * FIXME: we may not allocate every single block here.
3185 total_data_rate = skl_get_total_relative_data_rate(cstate);
3186 if (total_data_rate == 0)
3189 start = alloc->start;
3190 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3191 unsigned int data_rate, y_data_rate;
3192 uint16_t plane_blocks, y_plane_blocks = 0;
3193 int id = skl_wm_plane_id(intel_plane);
3195 data_rate = cstate->wm.skl.plane_data_rate[id];
3198 * allocation for (packed formats) or (uv-plane part of planar format):
3199 * promote the expression to 64 bits to avoid overflowing, the
3200 * result is < available as data_rate / total_data_rate < 1
3202 plane_blocks = minimum[id];
3203 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3206 /* Leave disabled planes at (0,0) */
3208 ddb->plane[pipe][id].start = start;
3209 ddb->plane[pipe][id].end = start + plane_blocks;
3212 start += plane_blocks;
3215 * allocation for y_plane part of planar format:
3217 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3219 y_plane_blocks = y_minimum[id];
3220 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3224 ddb->y_plane[pipe][id].start = start;
3225 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3228 start += y_plane_blocks;
3234 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3236 /* TODO: Take into account the scalers once we support them */
3237 return config->base.adjusted_mode.crtc_clock;
3241 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3242 * for the read latency) and cpp should always be <= 8, so that
3243 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3244 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3246 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3248 uint32_t wm_intermediate_val, ret;
3253 wm_intermediate_val = latency * pixel_rate * cpp / 512;
3254 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3259 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3260 uint32_t horiz_pixels, uint8_t cpp,
3261 uint64_t tiling, uint32_t latency)
3264 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3265 uint32_t wm_intermediate_val;
3270 plane_bytes_per_line = horiz_pixels * cpp;
3272 if (tiling == I915_FORMAT_MOD_Y_TILED ||
3273 tiling == I915_FORMAT_MOD_Yf_TILED) {
3274 plane_bytes_per_line *= 4;
3275 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3276 plane_blocks_per_line /= 4;
3278 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3281 wm_intermediate_val = latency * pixel_rate;
3282 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3283 plane_blocks_per_line;
3288 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3289 struct intel_crtc_state *cstate,
3290 struct intel_plane_state *intel_pstate,
3291 uint16_t ddb_allocation,
3293 uint16_t *out_blocks, /* out */
3294 uint8_t *out_lines, /* out */
3295 bool *enabled /* out */)
3297 struct drm_plane_state *pstate = &intel_pstate->base;
3298 struct drm_framebuffer *fb = pstate->fb;
3299 uint32_t latency = dev_priv->wm.skl_latency[level];
3300 uint32_t method1, method2;
3301 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3302 uint32_t res_blocks, res_lines;
3303 uint32_t selected_result;
3305 uint32_t width = 0, height = 0;
3307 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3312 width = drm_rect_width(&intel_pstate->src) >> 16;
3313 height = drm_rect_height(&intel_pstate->src) >> 16;
3315 if (intel_rotation_90_or_270(pstate->rotation))
3316 swap(width, height);
3318 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3319 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3321 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3322 cstate->base.adjusted_mode.crtc_htotal,
3328 plane_bytes_per_line = width * cpp;
3329 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3331 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3332 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3333 uint32_t min_scanlines = 4;
3334 uint32_t y_tile_minimum;
3335 if (intel_rotation_90_or_270(pstate->rotation)) {
3336 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3337 drm_format_plane_cpp(fb->pixel_format, 1) :
3338 drm_format_plane_cpp(fb->pixel_format, 0);
3348 WARN(1, "Unsupported pixel depth for rotation");
3351 y_tile_minimum = plane_blocks_per_line * min_scanlines;
3352 selected_result = max(method2, y_tile_minimum);
3354 if ((ddb_allocation / plane_blocks_per_line) >= 1)
3355 selected_result = min(method1, method2);
3357 selected_result = method1;
3360 res_blocks = selected_result + 1;
3361 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3363 if (level >= 1 && level <= 7) {
3364 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3365 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3371 if (res_blocks >= ddb_allocation || res_lines > 31) {
3375 * If there are no valid level 0 watermarks, then we can't
3376 * support this display configuration.
3381 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3382 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3383 to_intel_crtc(cstate->base.crtc)->pipe,
3384 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3385 res_blocks, ddb_allocation, res_lines);
3391 *out_blocks = res_blocks;
3392 *out_lines = res_lines;
3399 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3400 struct skl_ddb_allocation *ddb,
3401 struct intel_crtc_state *cstate,
3403 struct skl_wm_level *result)
3405 struct drm_device *dev = dev_priv->dev;
3406 struct drm_atomic_state *state = cstate->base.state;
3407 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3408 struct drm_plane *plane;
3409 struct intel_plane *intel_plane;
3410 struct intel_plane_state *intel_pstate;
3411 uint16_t ddb_blocks;
3412 enum pipe pipe = intel_crtc->pipe;
3416 * We'll only calculate watermarks for planes that are actually
3417 * enabled, so make sure all other planes are set as disabled.
3419 memset(result, 0, sizeof(*result));
3421 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
3422 int i = skl_wm_plane_id(intel_plane);
3424 plane = &intel_plane->base;
3425 intel_pstate = NULL;
3428 intel_atomic_get_existing_plane_state(state,
3432 * Note: If we start supporting multiple pending atomic commits
3433 * against the same planes/CRTC's in the future, plane->state
3434 * will no longer be the correct pre-state to use for the
3435 * calculations here and we'll need to change where we get the
3436 * 'unchanged' plane data from.
3438 * For now this is fine because we only allow one queued commit
3439 * against a CRTC. Even if the plane isn't modified by this
3440 * transaction and we don't have a plane lock, we still have
3441 * the CRTC's lock, so we know that no other transactions are
3442 * racing with us to update it.
3445 intel_pstate = to_intel_plane_state(plane->state);
3447 WARN_ON(!intel_pstate->base.fb);
3449 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3451 ret = skl_compute_plane_wm(dev_priv,
3456 &result->plane_res_b[i],
3457 &result->plane_res_l[i],
3458 &result->plane_en[i]);
3467 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3469 if (!cstate->base.active)
3472 if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3475 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3476 skl_pipe_pixel_rate(cstate));
3479 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3480 struct skl_wm_level *trans_wm /* out */)
3482 struct drm_crtc *crtc = cstate->base.crtc;
3483 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3484 struct intel_plane *intel_plane;
3486 if (!cstate->base.active)
3489 /* Until we know more, just disable transition WMs */
3490 for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3491 int i = skl_wm_plane_id(intel_plane);
3493 trans_wm->plane_en[i] = false;
3497 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3498 struct skl_ddb_allocation *ddb,
3499 struct skl_pipe_wm *pipe_wm)
3501 struct drm_device *dev = cstate->base.crtc->dev;
3502 const struct drm_i915_private *dev_priv = dev->dev_private;
3503 int level, max_level = ilk_wm_max_level(dev);
3506 for (level = 0; level <= max_level; level++) {
3507 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3508 level, &pipe_wm->wm[level]);
3512 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3514 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3519 static void skl_compute_wm_results(struct drm_device *dev,
3520 struct skl_pipe_wm *p_wm,
3521 struct skl_wm_values *r,
3522 struct intel_crtc *intel_crtc)
3524 int level, max_level = ilk_wm_max_level(dev);
3525 enum pipe pipe = intel_crtc->pipe;
3529 for (level = 0; level <= max_level; level++) {
3530 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3533 temp |= p_wm->wm[level].plane_res_l[i] <<
3534 PLANE_WM_LINES_SHIFT;
3535 temp |= p_wm->wm[level].plane_res_b[i];
3536 if (p_wm->wm[level].plane_en[i])
3537 temp |= PLANE_WM_EN;
3539 r->plane[pipe][i][level] = temp;
3544 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3545 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3547 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3548 temp |= PLANE_WM_EN;
3550 r->plane[pipe][PLANE_CURSOR][level] = temp;
3554 /* transition WMs */
3555 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3557 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3558 temp |= p_wm->trans_wm.plane_res_b[i];
3559 if (p_wm->trans_wm.plane_en[i])
3560 temp |= PLANE_WM_EN;
3562 r->plane_trans[pipe][i] = temp;
3566 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3567 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3568 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3569 temp |= PLANE_WM_EN;
3571 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3573 r->wm_linetime[pipe] = p_wm->linetime;
3576 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3578 const struct skl_ddb_entry *entry)
3581 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3586 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3587 const struct skl_wm_values *new)
3589 struct drm_device *dev = dev_priv->dev;
3590 struct intel_crtc *crtc;
3592 for_each_intel_crtc(dev, crtc) {
3593 int i, level, max_level = ilk_wm_max_level(dev);
3594 enum pipe pipe = crtc->pipe;
3596 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3601 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3603 for (level = 0; level <= max_level; level++) {
3604 for (i = 0; i < intel_num_planes(crtc); i++)
3605 I915_WRITE(PLANE_WM(pipe, i, level),
3606 new->plane[pipe][i][level]);
3607 I915_WRITE(CUR_WM(pipe, level),
3608 new->plane[pipe][PLANE_CURSOR][level]);
3610 for (i = 0; i < intel_num_planes(crtc); i++)
3611 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3612 new->plane_trans[pipe][i]);
3613 I915_WRITE(CUR_WM_TRANS(pipe),
3614 new->plane_trans[pipe][PLANE_CURSOR]);
3616 for (i = 0; i < intel_num_planes(crtc); i++) {
3617 skl_ddb_entry_write(dev_priv,
3618 PLANE_BUF_CFG(pipe, i),
3619 &new->ddb.plane[pipe][i]);
3620 skl_ddb_entry_write(dev_priv,
3621 PLANE_NV12_BUF_CFG(pipe, i),
3622 &new->ddb.y_plane[pipe][i]);
3625 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3626 &new->ddb.plane[pipe][PLANE_CURSOR]);
3631 * When setting up a new DDB allocation arrangement, we need to correctly
3632 * sequence the times at which the new allocations for the pipes are taken into
3633 * account or we'll have pipes fetching from space previously allocated to
3636 * Roughly the sequence looks like:
3637 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3638 * overlapping with a previous light-up pipe (another way to put it is:
3639 * pipes with their new allocation strickly included into their old ones).
3640 * 2. re-allocate the other pipes that get their allocation reduced
3641 * 3. allocate the pipes having their allocation increased
3643 * Steps 1. and 2. are here to take care of the following case:
3644 * - Initially DDB looks like this:
3647 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3651 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3655 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3659 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3661 for_each_plane(dev_priv, pipe, plane) {
3662 I915_WRITE(PLANE_SURF(pipe, plane),
3663 I915_READ(PLANE_SURF(pipe, plane)));
3665 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3669 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3670 const struct skl_ddb_allocation *new,
3673 uint16_t old_size, new_size;
3675 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3676 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3678 return old_size != new_size &&
3679 new->pipe[pipe].start >= old->pipe[pipe].start &&
3680 new->pipe[pipe].end <= old->pipe[pipe].end;
3683 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3684 struct skl_wm_values *new_values)
3686 struct drm_device *dev = dev_priv->dev;
3687 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3688 bool reallocated[I915_MAX_PIPES] = {};
3689 struct intel_crtc *crtc;
3692 new_ddb = &new_values->ddb;
3693 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3696 * First pass: flush the pipes with the new allocation contained into
3699 * We'll wait for the vblank on those pipes to ensure we can safely
3700 * re-allocate the freed space without this pipe fetching from it.
3702 for_each_intel_crtc(dev, crtc) {
3708 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3711 skl_wm_flush_pipe(dev_priv, pipe, 1);
3712 intel_wait_for_vblank(dev, pipe);
3714 reallocated[pipe] = true;
3719 * Second pass: flush the pipes that are having their allocation
3720 * reduced, but overlapping with a previous allocation.
3722 * Here as well we need to wait for the vblank to make sure the freed
3723 * space is not used anymore.
3725 for_each_intel_crtc(dev, crtc) {
3731 if (reallocated[pipe])
3734 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3735 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3736 skl_wm_flush_pipe(dev_priv, pipe, 2);
3737 intel_wait_for_vblank(dev, pipe);
3738 reallocated[pipe] = true;
3743 * Third pass: flush the pipes that got more space allocated.
3745 * We don't need to actively wait for the update here, next vblank
3746 * will just get more DDB space with the correct WM values.
3748 for_each_intel_crtc(dev, crtc) {
3755 * At this point, only the pipes more space than before are
3756 * left to re-allocate.
3758 if (reallocated[pipe])
3761 skl_wm_flush_pipe(dev_priv, pipe, 3);
3765 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3766 struct skl_ddb_allocation *ddb, /* out */
3767 struct skl_pipe_wm *pipe_wm, /* out */
3768 bool *changed /* out */)
3770 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3771 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3774 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3778 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3787 skl_compute_ddb(struct drm_atomic_state *state)
3789 struct drm_device *dev = state->dev;
3790 struct drm_i915_private *dev_priv = to_i915(dev);
3791 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3792 struct intel_crtc *intel_crtc;
3793 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3794 unsigned realloc_pipes = dev_priv->active_crtcs;
3798 * If this is our first atomic update following hardware readout,
3799 * we can't trust the DDB that the BIOS programmed for us. Let's
3800 * pretend that all pipes switched active status so that we'll
3801 * ensure a full DDB recompute.
3803 if (dev_priv->wm.distrust_bios_wm)
3804 intel_state->active_pipe_changes = ~0;
3807 * If the modeset changes which CRTC's are active, we need to
3808 * recompute the DDB allocation for *all* active pipes, even
3809 * those that weren't otherwise being modified in any way by this
3810 * atomic commit. Due to the shrinking of the per-pipe allocations
3811 * when new active CRTC's are added, it's possible for a pipe that
3812 * we were already using and aren't changing at all here to suddenly
3813 * become invalid if its DDB needs exceeds its new allocation.
3815 * Note that if we wind up doing a full DDB recompute, we can't let
3816 * any other display updates race with this transaction, so we need
3817 * to grab the lock on *all* CRTC's.
3819 if (intel_state->active_pipe_changes) {
3821 intel_state->wm_results.dirty_pipes = ~0;
3824 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3825 struct intel_crtc_state *cstate;
3827 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3829 return PTR_ERR(cstate);
3831 ret = skl_allocate_pipe_ddb(cstate, ddb);
3840 skl_compute_wm(struct drm_atomic_state *state)
3842 struct drm_crtc *crtc;
3843 struct drm_crtc_state *cstate;
3844 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3845 struct skl_wm_values *results = &intel_state->wm_results;
3846 struct skl_pipe_wm *pipe_wm;
3847 bool changed = false;
3851 * If this transaction isn't actually touching any CRTC's, don't
3852 * bother with watermark calculation. Note that if we pass this
3853 * test, we're guaranteed to hold at least one CRTC state mutex,
3854 * which means we can safely use values like dev_priv->active_crtcs
3855 * since any racing commits that want to update them would need to
3856 * hold _all_ CRTC state mutexes.
3858 for_each_crtc_in_state(state, crtc, cstate, i)
3863 /* Clear all dirty flags */
3864 results->dirty_pipes = 0;
3866 ret = skl_compute_ddb(state);
3871 * Calculate WM's for all pipes that are part of this transaction.
3872 * Note that the DDB allocation above may have added more CRTC's that
3873 * weren't otherwise being modified (and set bits in dirty_pipes) if
3874 * pipe allocations had to change.
3876 * FIXME: Now that we're doing this in the atomic check phase, we
3877 * should allow skl_update_pipe_wm() to return failure in cases where
3878 * no suitable watermark values can be found.
3880 for_each_crtc_in_state(state, crtc, cstate, i) {
3881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3882 struct intel_crtc_state *intel_cstate =
3883 to_intel_crtc_state(cstate);
3885 pipe_wm = &intel_cstate->wm.skl.optimal;
3886 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3892 results->dirty_pipes |= drm_crtc_mask(crtc);
3894 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3895 /* This pipe's WM's did not change */
3898 intel_cstate->update_wm_pre = true;
3899 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
3905 static void skl_update_wm(struct drm_crtc *crtc)
3907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3908 struct drm_device *dev = crtc->dev;
3909 struct drm_i915_private *dev_priv = dev->dev_private;
3910 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3911 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3912 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
3914 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3917 intel_crtc->wm.active.skl = *pipe_wm;
3919 mutex_lock(&dev_priv->wm.wm_mutex);
3921 skl_write_wm_values(dev_priv, results);
3922 skl_flush_wm_values(dev_priv, results);
3924 /* store the new configuration */
3925 dev_priv->wm.skl_hw = *results;
3927 mutex_unlock(&dev_priv->wm.wm_mutex);
3930 static void ilk_compute_wm_config(struct drm_device *dev,
3931 struct intel_wm_config *config)
3933 struct intel_crtc *crtc;
3935 /* Compute the currently _active_ config */
3936 for_each_intel_crtc(dev, crtc) {
3937 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3939 if (!wm->pipe_enabled)
3942 config->sprites_enabled |= wm->sprites_enabled;
3943 config->sprites_scaled |= wm->sprites_scaled;
3944 config->num_pipes_active++;
3948 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3950 struct drm_device *dev = dev_priv->dev;
3951 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3952 struct ilk_wm_maximums max;
3953 struct intel_wm_config config = {};
3954 struct ilk_wm_values results = {};
3955 enum intel_ddb_partitioning partitioning;
3957 ilk_compute_wm_config(dev, &config);
3959 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3960 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3962 /* 5/6 split only in single pipe config on IVB+ */
3963 if (INTEL_INFO(dev)->gen >= 7 &&
3964 config.num_pipes_active == 1 && config.sprites_enabled) {
3965 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3966 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3968 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3970 best_lp_wm = &lp_wm_1_2;
3973 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3974 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3976 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3978 ilk_write_wm_values(dev_priv, &results);
3981 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3983 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
3984 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3986 mutex_lock(&dev_priv->wm.wm_mutex);
3987 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
3988 ilk_program_watermarks(dev_priv);
3989 mutex_unlock(&dev_priv->wm.wm_mutex);
3992 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3994 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
3995 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3997 mutex_lock(&dev_priv->wm.wm_mutex);
3998 if (cstate->wm.need_postvbl_update) {
3999 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4000 ilk_program_watermarks(dev_priv);
4002 mutex_unlock(&dev_priv->wm.wm_mutex);
4005 static void skl_pipe_wm_active_state(uint32_t val,
4006 struct skl_pipe_wm *active,
4012 bool is_enabled = (val & PLANE_WM_EN) != 0;
4016 active->wm[level].plane_en[i] = is_enabled;
4017 active->wm[level].plane_res_b[i] =
4018 val & PLANE_WM_BLOCKS_MASK;
4019 active->wm[level].plane_res_l[i] =
4020 (val >> PLANE_WM_LINES_SHIFT) &
4021 PLANE_WM_LINES_MASK;
4023 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
4024 active->wm[level].plane_res_b[PLANE_CURSOR] =
4025 val & PLANE_WM_BLOCKS_MASK;
4026 active->wm[level].plane_res_l[PLANE_CURSOR] =
4027 (val >> PLANE_WM_LINES_SHIFT) &
4028 PLANE_WM_LINES_MASK;
4032 active->trans_wm.plane_en[i] = is_enabled;
4033 active->trans_wm.plane_res_b[i] =
4034 val & PLANE_WM_BLOCKS_MASK;
4035 active->trans_wm.plane_res_l[i] =
4036 (val >> PLANE_WM_LINES_SHIFT) &
4037 PLANE_WM_LINES_MASK;
4039 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
4040 active->trans_wm.plane_res_b[PLANE_CURSOR] =
4041 val & PLANE_WM_BLOCKS_MASK;
4042 active->trans_wm.plane_res_l[PLANE_CURSOR] =
4043 (val >> PLANE_WM_LINES_SHIFT) &
4044 PLANE_WM_LINES_MASK;
4049 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4051 struct drm_device *dev = crtc->dev;
4052 struct drm_i915_private *dev_priv = dev->dev_private;
4053 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4055 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4056 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
4057 enum pipe pipe = intel_crtc->pipe;
4058 int level, i, max_level;
4061 max_level = ilk_wm_max_level(dev);
4063 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4065 for (level = 0; level <= max_level; level++) {
4066 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4067 hw->plane[pipe][i][level] =
4068 I915_READ(PLANE_WM(pipe, i, level));
4069 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
4072 for (i = 0; i < intel_num_planes(intel_crtc); i++)
4073 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4074 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
4076 if (!intel_crtc->active)
4079 hw->dirty_pipes |= drm_crtc_mask(crtc);
4081 active->linetime = hw->wm_linetime[pipe];
4083 for (level = 0; level <= max_level; level++) {
4084 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4085 temp = hw->plane[pipe][i][level];
4086 skl_pipe_wm_active_state(temp, active, false,
4089 temp = hw->plane[pipe][PLANE_CURSOR][level];
4090 skl_pipe_wm_active_state(temp, active, false, true, i, level);
4093 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4094 temp = hw->plane_trans[pipe][i];
4095 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
4098 temp = hw->plane_trans[pipe][PLANE_CURSOR];
4099 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4101 intel_crtc->wm.active.skl = *active;
4104 void skl_wm_get_hw_state(struct drm_device *dev)
4106 struct drm_i915_private *dev_priv = dev->dev_private;
4107 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4108 struct drm_crtc *crtc;
4110 skl_ddb_get_hw_state(dev_priv, ddb);
4111 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4112 skl_pipe_wm_get_hw_state(crtc);
4114 if (dev_priv->active_crtcs) {
4115 /* Fully recompute DDB on first atomic commit */
4116 dev_priv->wm.distrust_bios_wm = true;
4118 /* Easy/common case; just sanitize DDB now if everything off */
4119 memset(ddb, 0, sizeof(*ddb));
4123 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4125 struct drm_device *dev = crtc->dev;
4126 struct drm_i915_private *dev_priv = dev->dev_private;
4127 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4129 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4130 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4131 enum pipe pipe = intel_crtc->pipe;
4132 static const i915_reg_t wm0_pipe_reg[] = {
4133 [PIPE_A] = WM0_PIPEA_ILK,
4134 [PIPE_B] = WM0_PIPEB_ILK,
4135 [PIPE_C] = WM0_PIPEC_IVB,
4138 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4139 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4140 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4142 memset(active, 0, sizeof(*active));
4144 active->pipe_enabled = intel_crtc->active;
4146 if (active->pipe_enabled) {
4147 u32 tmp = hw->wm_pipe[pipe];
4150 * For active pipes LP0 watermark is marked as
4151 * enabled, and LP1+ watermaks as disabled since
4152 * we can't really reverse compute them in case
4153 * multiple pipes are active.
4155 active->wm[0].enable = true;
4156 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4157 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4158 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4159 active->linetime = hw->wm_linetime[pipe];
4161 int level, max_level = ilk_wm_max_level(dev);
4164 * For inactive pipes, all watermark levels
4165 * should be marked as enabled but zeroed,
4166 * which is what we'd compute them to.
4168 for (level = 0; level <= max_level; level++)
4169 active->wm[level].enable = true;
4172 intel_crtc->wm.active.ilk = *active;
4175 #define _FW_WM(value, plane) \
4176 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4177 #define _FW_WM_VLV(value, plane) \
4178 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4180 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4181 struct vlv_wm_values *wm)
4186 for_each_pipe(dev_priv, pipe) {
4187 tmp = I915_READ(VLV_DDL(pipe));
4189 wm->ddl[pipe].primary =
4190 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4191 wm->ddl[pipe].cursor =
4192 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4193 wm->ddl[pipe].sprite[0] =
4194 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4195 wm->ddl[pipe].sprite[1] =
4196 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4199 tmp = I915_READ(DSPFW1);
4200 wm->sr.plane = _FW_WM(tmp, SR);
4201 wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4202 wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4203 wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4205 tmp = I915_READ(DSPFW2);
4206 wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4207 wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4208 wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4210 tmp = I915_READ(DSPFW3);
4211 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4213 if (IS_CHERRYVIEW(dev_priv)) {
4214 tmp = I915_READ(DSPFW7_CHV);
4215 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4216 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4218 tmp = I915_READ(DSPFW8_CHV);
4219 wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4220 wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4222 tmp = I915_READ(DSPFW9_CHV);
4223 wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4224 wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4226 tmp = I915_READ(DSPHOWM);
4227 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4228 wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4229 wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4230 wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4231 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4232 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4233 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4234 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4235 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4236 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4238 tmp = I915_READ(DSPFW7);
4239 wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4240 wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4242 tmp = I915_READ(DSPHOWM);
4243 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4244 wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4245 wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4246 wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4247 wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4248 wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4249 wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4256 void vlv_wm_get_hw_state(struct drm_device *dev)
4258 struct drm_i915_private *dev_priv = to_i915(dev);
4259 struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4260 struct intel_plane *plane;
4264 vlv_read_wm_values(dev_priv, wm);
4266 for_each_intel_plane(dev, plane) {
4267 switch (plane->base.type) {
4269 case DRM_PLANE_TYPE_CURSOR:
4270 plane->wm.fifo_size = 63;
4272 case DRM_PLANE_TYPE_PRIMARY:
4273 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4275 case DRM_PLANE_TYPE_OVERLAY:
4276 sprite = plane->plane;
4277 plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4282 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4283 wm->level = VLV_WM_LEVEL_PM2;
4285 if (IS_CHERRYVIEW(dev_priv)) {
4286 mutex_lock(&dev_priv->rps.hw_lock);
4288 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4289 if (val & DSP_MAXFIFO_PM5_ENABLE)
4290 wm->level = VLV_WM_LEVEL_PM5;
4293 * If DDR DVFS is disabled in the BIOS, Punit
4294 * will never ack the request. So if that happens
4295 * assume we don't have to enable/disable DDR DVFS
4296 * dynamically. To test that just set the REQ_ACK
4297 * bit to poke the Punit, but don't change the
4298 * HIGH/LOW bits so that we don't actually change
4299 * the current state.
4301 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4302 val |= FORCE_DDR_FREQ_REQ_ACK;
4303 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4305 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4306 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4307 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4308 "assuming DDR DVFS is disabled\n");
4309 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4311 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4312 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4313 wm->level = VLV_WM_LEVEL_DDR_DVFS;
4316 mutex_unlock(&dev_priv->rps.hw_lock);
4319 for_each_pipe(dev_priv, pipe)
4320 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4321 pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4322 wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4324 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4325 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4328 void ilk_wm_get_hw_state(struct drm_device *dev)
4330 struct drm_i915_private *dev_priv = dev->dev_private;
4331 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4332 struct drm_crtc *crtc;
4334 for_each_crtc(dev, crtc)
4335 ilk_pipe_wm_get_hw_state(crtc);
4337 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4338 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4339 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4341 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4342 if (INTEL_INFO(dev)->gen >= 7) {
4343 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4344 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4347 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4348 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4349 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4350 else if (IS_IVYBRIDGE(dev))
4351 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4352 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4355 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4359 * intel_update_watermarks - update FIFO watermark values based on current modes
4361 * Calculate watermark values for the various WM regs based on current mode
4362 * and plane configuration.
4364 * There are several cases to deal with here:
4365 * - normal (i.e. non-self-refresh)
4366 * - self-refresh (SR) mode
4367 * - lines are large relative to FIFO size (buffer can hold up to 2)
4368 * - lines are small relative to FIFO size (buffer can hold more than 2
4369 * lines), so need to account for TLB latency
4371 * The normal calculation is:
4372 * watermark = dotclock * bytes per pixel * latency
4373 * where latency is platform & configuration dependent (we assume pessimal
4376 * The SR calculation is:
4377 * watermark = (trunc(latency/line time)+1) * surface width *
4380 * line time = htotal / dotclock
4381 * surface width = hdisplay for normal plane and 64 for cursor
4382 * and latency is assumed to be high, as above.
4384 * The final value programmed to the register should always be rounded up,
4385 * and include an extra 2 entries to account for clock crossings.
4387 * We don't use the sprite, so we can ignore that. And on Crestline we have
4388 * to set the non-SR watermarks to 8.
4390 void intel_update_watermarks(struct drm_crtc *crtc)
4392 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4394 if (dev_priv->display.update_wm)
4395 dev_priv->display.update_wm(crtc);
4399 * Lock protecting IPS related data structures
4401 DEFINE_SPINLOCK(mchdev_lock);
4403 /* Global for IPS driver to get at the current i915 device. Protected by
4405 static struct drm_i915_private *i915_mch_dev;
4407 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4411 assert_spin_locked(&mchdev_lock);
4413 rgvswctl = I915_READ16(MEMSWCTL);
4414 if (rgvswctl & MEMCTL_CMD_STS) {
4415 DRM_DEBUG("gpu busy, RCS change rejected\n");
4416 return false; /* still busy with another command */
4419 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4420 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4421 I915_WRITE16(MEMSWCTL, rgvswctl);
4422 POSTING_READ16(MEMSWCTL);
4424 rgvswctl |= MEMCTL_CMD_STS;
4425 I915_WRITE16(MEMSWCTL, rgvswctl);
4430 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4433 u8 fmax, fmin, fstart, vstart;
4435 spin_lock_irq(&mchdev_lock);
4437 rgvmodectl = I915_READ(MEMMODECTL);
4439 /* Enable temp reporting */
4440 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4441 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4443 /* 100ms RC evaluation intervals */
4444 I915_WRITE(RCUPEI, 100000);
4445 I915_WRITE(RCDNEI, 100000);
4447 /* Set max/min thresholds to 90ms and 80ms respectively */
4448 I915_WRITE(RCBMAXAVG, 90000);
4449 I915_WRITE(RCBMINAVG, 80000);
4451 I915_WRITE(MEMIHYST, 1);
4453 /* Set up min, max, and cur for interrupt handling */
4454 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4455 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4456 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4457 MEMMODE_FSTART_SHIFT;
4459 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4462 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4463 dev_priv->ips.fstart = fstart;
4465 dev_priv->ips.max_delay = fstart;
4466 dev_priv->ips.min_delay = fmin;
4467 dev_priv->ips.cur_delay = fstart;
4469 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4470 fmax, fmin, fstart);
4472 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4475 * Interrupts will be enabled in ironlake_irq_postinstall
4478 I915_WRITE(VIDSTART, vstart);
4479 POSTING_READ(VIDSTART);
4481 rgvmodectl |= MEMMODE_SWMODE_EN;
4482 I915_WRITE(MEMMODECTL, rgvmodectl);
4484 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4485 DRM_ERROR("stuck trying to change perf mode\n");
4488 ironlake_set_drps(dev_priv, fstart);
4490 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4491 I915_READ(DDREC) + I915_READ(CSIEC);
4492 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4493 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4494 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4496 spin_unlock_irq(&mchdev_lock);
4499 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4503 spin_lock_irq(&mchdev_lock);
4505 rgvswctl = I915_READ16(MEMSWCTL);
4507 /* Ack interrupts, disable EFC interrupt */
4508 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4509 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4510 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4511 I915_WRITE(DEIIR, DE_PCU_EVENT);
4512 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4514 /* Go back to the starting frequency */
4515 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4517 rgvswctl |= MEMCTL_CMD_STS;
4518 I915_WRITE(MEMSWCTL, rgvswctl);
4521 spin_unlock_irq(&mchdev_lock);
4524 /* There's a funny hw issue where the hw returns all 0 when reading from
4525 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4526 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4527 * all limits and the gpu stuck at whatever frequency it is at atm).
4529 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4533 /* Only set the down limit when we've reached the lowest level to avoid
4534 * getting more interrupts, otherwise leave this clear. This prevents a
4535 * race in the hw when coming out of rc6: There's a tiny window where
4536 * the hw runs at the minimal clock before selecting the desired
4537 * frequency, if the down threshold expires in that window we will not
4538 * receive a down interrupt. */
4539 if (IS_GEN9(dev_priv)) {
4540 limits = (dev_priv->rps.max_freq_softlimit) << 23;
4541 if (val <= dev_priv->rps.min_freq_softlimit)
4542 limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4544 limits = dev_priv->rps.max_freq_softlimit << 24;
4545 if (val <= dev_priv->rps.min_freq_softlimit)
4546 limits |= dev_priv->rps.min_freq_softlimit << 16;
4552 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4555 u32 threshold_up = 0, threshold_down = 0; /* in % */
4556 u32 ei_up = 0, ei_down = 0;
4558 new_power = dev_priv->rps.power;
4559 switch (dev_priv->rps.power) {
4561 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
4562 new_power = BETWEEN;
4566 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
4567 new_power = LOW_POWER;
4568 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
4569 new_power = HIGH_POWER;
4573 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
4574 new_power = BETWEEN;
4577 /* Max/min bins are special */
4578 if (val <= dev_priv->rps.min_freq_softlimit)
4579 new_power = LOW_POWER;
4580 if (val >= dev_priv->rps.max_freq_softlimit)
4581 new_power = HIGH_POWER;
4582 if (new_power == dev_priv->rps.power)
4585 /* Note the units here are not exactly 1us, but 1280ns. */
4586 switch (new_power) {
4588 /* Upclock if more than 95% busy over 16ms */
4592 /* Downclock if less than 85% busy over 32ms */
4594 threshold_down = 85;
4598 /* Upclock if more than 90% busy over 13ms */
4602 /* Downclock if less than 75% busy over 32ms */
4604 threshold_down = 75;
4608 /* Upclock if more than 85% busy over 10ms */
4612 /* Downclock if less than 60% busy over 32ms */
4614 threshold_down = 60;
4618 I915_WRITE(GEN6_RP_UP_EI,
4619 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4620 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4621 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
4623 I915_WRITE(GEN6_RP_DOWN_EI,
4624 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4625 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4626 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
4628 I915_WRITE(GEN6_RP_CONTROL,
4629 GEN6_RP_MEDIA_TURBO |
4630 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4631 GEN6_RP_MEDIA_IS_GFX |
4633 GEN6_RP_UP_BUSY_AVG |
4634 GEN6_RP_DOWN_IDLE_AVG);
4636 dev_priv->rps.power = new_power;
4637 dev_priv->rps.up_threshold = threshold_up;
4638 dev_priv->rps.down_threshold = threshold_down;
4639 dev_priv->rps.last_adj = 0;
4642 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4646 if (val > dev_priv->rps.min_freq_softlimit)
4647 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4648 if (val < dev_priv->rps.max_freq_softlimit)
4649 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4651 mask &= dev_priv->pm_rps_events;
4653 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4656 /* gen6_set_rps is called to update the frequency request, but should also be
4657 * called when the range (min_delay and max_delay) is modified so that we can
4658 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4659 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4661 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4662 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4665 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4666 WARN_ON(val > dev_priv->rps.max_freq);
4667 WARN_ON(val < dev_priv->rps.min_freq);
4669 /* min/max delay may still have been modified so be sure to
4670 * write the limits value.
4672 if (val != dev_priv->rps.cur_freq) {
4673 gen6_set_rps_thresholds(dev_priv, val);
4675 if (IS_GEN9(dev_priv))
4676 I915_WRITE(GEN6_RPNSWREQ,
4677 GEN9_FREQUENCY(val));
4678 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4679 I915_WRITE(GEN6_RPNSWREQ,
4680 HSW_FREQUENCY(val));
4682 I915_WRITE(GEN6_RPNSWREQ,
4683 GEN6_FREQUENCY(val) |
4685 GEN6_AGGRESSIVE_TURBO);
4688 /* Make sure we continue to get interrupts
4689 * until we hit the minimum or maximum frequencies.
4691 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4692 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4694 POSTING_READ(GEN6_RPNSWREQ);
4696 dev_priv->rps.cur_freq = val;
4697 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4700 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4702 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4703 WARN_ON(val > dev_priv->rps.max_freq);
4704 WARN_ON(val < dev_priv->rps.min_freq);
4706 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4707 "Odd GPU freq value\n"))
4710 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4712 if (val != dev_priv->rps.cur_freq) {
4713 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4714 if (!IS_CHERRYVIEW(dev_priv))
4715 gen6_set_rps_thresholds(dev_priv, val);
4718 dev_priv->rps.cur_freq = val;
4719 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4722 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4724 * * If Gfx is Idle, then
4725 * 1. Forcewake Media well.
4726 * 2. Request idle freq.
4727 * 3. Release Forcewake of Media well.
4729 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4731 u32 val = dev_priv->rps.idle_freq;
4733 if (dev_priv->rps.cur_freq <= val)
4736 /* Wake up the media well, as that takes a lot less
4737 * power than the Render well. */
4738 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4739 valleyview_set_rps(dev_priv, val);
4740 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4743 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4745 mutex_lock(&dev_priv->rps.hw_lock);
4746 if (dev_priv->rps.enabled) {
4747 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4748 gen6_rps_reset_ei(dev_priv);
4749 I915_WRITE(GEN6_PMINTRMSK,
4750 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4752 mutex_unlock(&dev_priv->rps.hw_lock);
4755 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4757 mutex_lock(&dev_priv->rps.hw_lock);
4758 if (dev_priv->rps.enabled) {
4759 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4760 vlv_set_rps_idle(dev_priv);
4762 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4763 dev_priv->rps.last_adj = 0;
4764 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4766 mutex_unlock(&dev_priv->rps.hw_lock);
4768 spin_lock(&dev_priv->rps.client_lock);
4769 while (!list_empty(&dev_priv->rps.clients))
4770 list_del_init(dev_priv->rps.clients.next);
4771 spin_unlock(&dev_priv->rps.client_lock);
4774 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4775 struct intel_rps_client *rps,
4776 unsigned long submitted)
4778 /* This is intentionally racy! We peek at the state here, then
4779 * validate inside the RPS worker.
4781 if (!(dev_priv->mm.busy &&
4782 dev_priv->rps.enabled &&
4783 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4786 /* Force a RPS boost (and don't count it against the client) if
4787 * the GPU is severely congested.
4789 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4792 spin_lock(&dev_priv->rps.client_lock);
4793 if (rps == NULL || list_empty(&rps->link)) {
4794 spin_lock_irq(&dev_priv->irq_lock);
4795 if (dev_priv->rps.interrupts_enabled) {
4796 dev_priv->rps.client_boost = true;
4797 queue_work(dev_priv->wq, &dev_priv->rps.work);
4799 spin_unlock_irq(&dev_priv->irq_lock);
4802 list_add(&rps->link, &dev_priv->rps.clients);
4805 dev_priv->rps.boosts++;
4807 spin_unlock(&dev_priv->rps.client_lock);
4810 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4812 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4813 valleyview_set_rps(dev_priv, val);
4815 gen6_set_rps(dev_priv, val);
4818 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4820 I915_WRITE(GEN6_RC_CONTROL, 0);
4821 I915_WRITE(GEN9_PG_ENABLE, 0);
4824 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4826 I915_WRITE(GEN6_RP_CONTROL, 0);
4829 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4831 I915_WRITE(GEN6_RC_CONTROL, 0);
4832 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4833 I915_WRITE(GEN6_RP_CONTROL, 0);
4836 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4838 I915_WRITE(GEN6_RC_CONTROL, 0);
4841 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4843 /* we're doing forcewake before Disabling RC6,
4844 * This what the BIOS expects when going into suspend */
4845 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4847 I915_WRITE(GEN6_RC_CONTROL, 0);
4849 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4852 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4854 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4855 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4856 mode = GEN6_RC_CTL_RC6_ENABLE;
4860 if (HAS_RC6p(dev_priv))
4861 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4862 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4863 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4864 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4867 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4868 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4871 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4873 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4874 bool enable_rc6 = true;
4875 unsigned long rc6_ctx_base;
4877 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4878 DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
4883 * The exact context size is not known for BXT, so assume a page size
4886 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
4887 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
4888 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
4889 ggtt->stolen_reserved_size))) {
4890 DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
4894 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
4895 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4896 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4897 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4898 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
4902 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
4903 GEN6_RC_CTL_HW_ENABLE)) &&
4904 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
4905 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
4906 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
4913 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
4915 /* No RC6 before Ironlake and code is gone for ilk. */
4916 if (INTEL_INFO(dev_priv)->gen < 6)
4922 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
4923 DRM_INFO("RC6 disabled by BIOS\n");
4927 /* Respect the kernel parameter if it is set */
4928 if (enable_rc6 >= 0) {
4931 if (HAS_RC6p(dev_priv))
4932 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4935 mask = INTEL_RC6_ENABLE;
4937 if ((enable_rc6 & mask) != enable_rc6)
4938 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4939 enable_rc6 & mask, enable_rc6, mask);
4941 return enable_rc6 & mask;
4944 if (IS_IVYBRIDGE(dev_priv))
4945 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4947 return INTEL_RC6_ENABLE;
4950 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
4952 uint32_t rp_state_cap;
4953 u32 ddcc_status = 0;
4956 /* All of these values are in units of 50MHz */
4957 dev_priv->rps.cur_freq = 0;
4958 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4959 if (IS_BROXTON(dev_priv)) {
4960 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4961 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4962 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4963 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
4965 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4966 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4967 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4968 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4971 /* hw_max = RP0 until we check for overclocking */
4972 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4974 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4975 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
4976 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4977 ret = sandybridge_pcode_read(dev_priv,
4978 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4981 dev_priv->rps.efficient_freq =
4983 ((ddcc_status >> 8) & 0xff),
4984 dev_priv->rps.min_freq,
4985 dev_priv->rps.max_freq);
4988 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4989 /* Store the frequency values in 16.66 MHZ units, which is
4990 the natural hardware unit for SKL */
4991 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
4992 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
4993 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
4994 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
4995 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
4998 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5000 /* Preserve min/max settings in case of re-init */
5001 if (dev_priv->rps.max_freq_softlimit == 0)
5002 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5004 if (dev_priv->rps.min_freq_softlimit == 0) {
5005 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5006 dev_priv->rps.min_freq_softlimit =
5007 max_t(int, dev_priv->rps.efficient_freq,
5008 intel_freq_opcode(dev_priv, 450));
5010 dev_priv->rps.min_freq_softlimit =
5011 dev_priv->rps.min_freq;
5015 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5016 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5018 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5020 gen6_init_rps_frequencies(dev_priv);
5022 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5023 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5025 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5026 * clear out the Control register just to avoid inconsitency
5027 * with debugfs interface, which will show Turbo as enabled
5028 * only and that is not expected by the User after adding the
5029 * WaGsvDisableTurbo. Apart from this there is no problem even
5030 * if the Turbo is left enabled in the Control register, as the
5031 * Up/Down interrupts would remain masked.
5033 gen9_disable_rps(dev_priv);
5034 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5038 /* Program defaults and thresholds for RPS*/
5039 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5040 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5042 /* 1 second timeout*/
5043 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5044 GT_INTERVAL_FROM_US(dev_priv, 1000000));
5046 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5048 /* Leaning on the below call to gen6_set_rps to program/setup the
5049 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5050 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5051 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5052 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5054 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5057 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5059 struct intel_engine_cs *engine;
5060 uint32_t rc6_mask = 0;
5062 /* 1a: Software RC state - RC0 */
5063 I915_WRITE(GEN6_RC_STATE, 0);
5065 /* 1b: Get forcewake during program sequence. Although the driver
5066 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5067 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5069 /* 2a: Disable RC states. */
5070 I915_WRITE(GEN6_RC_CONTROL, 0);
5072 /* 2b: Program RC6 thresholds.*/
5074 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5075 if (IS_SKYLAKE(dev_priv))
5076 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5078 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5079 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5080 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5081 for_each_engine(engine, dev_priv)
5082 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5084 if (HAS_GUC(dev_priv))
5085 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5087 I915_WRITE(GEN6_RC_SLEEP, 0);
5089 /* 2c: Program Coarse Power Gating Policies. */
5090 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5091 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5093 /* 3a: Enable RC6 */
5094 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5095 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5096 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5097 /* WaRsUseTimeoutMode */
5098 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
5099 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5100 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5101 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5102 GEN7_RC_CTL_TO_MODE |
5105 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5106 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5107 GEN6_RC_CTL_EI_MODE(1) |
5112 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5113 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5115 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5116 I915_WRITE(GEN9_PG_ENABLE, 0);
5118 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5119 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5121 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5124 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5126 struct intel_engine_cs *engine;
5127 uint32_t rc6_mask = 0;
5129 /* 1a: Software RC state - RC0 */
5130 I915_WRITE(GEN6_RC_STATE, 0);
5132 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5133 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5134 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5136 /* 2a: Disable RC states. */
5137 I915_WRITE(GEN6_RC_CONTROL, 0);
5139 /* Initialize rps frequencies */
5140 gen6_init_rps_frequencies(dev_priv);
5142 /* 2b: Program RC6 thresholds.*/
5143 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5144 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5145 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5146 for_each_engine(engine, dev_priv)
5147 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5148 I915_WRITE(GEN6_RC_SLEEP, 0);
5149 if (IS_BROADWELL(dev_priv))
5150 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5152 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5155 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5156 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5157 intel_print_rc6_info(dev_priv, rc6_mask);
5158 if (IS_BROADWELL(dev_priv))
5159 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5160 GEN7_RC_CTL_TO_MODE |
5163 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5164 GEN6_RC_CTL_EI_MODE(1) |
5167 /* 4 Program defaults and thresholds for RPS*/
5168 I915_WRITE(GEN6_RPNSWREQ,
5169 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5170 I915_WRITE(GEN6_RC_VIDEO_FREQ,
5171 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5172 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5173 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5175 /* Docs recommend 900MHz, and 300 MHz respectively */
5176 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5177 dev_priv->rps.max_freq_softlimit << 24 |
5178 dev_priv->rps.min_freq_softlimit << 16);
5180 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5181 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5182 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5183 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5185 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5188 I915_WRITE(GEN6_RP_CONTROL,
5189 GEN6_RP_MEDIA_TURBO |
5190 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5191 GEN6_RP_MEDIA_IS_GFX |
5193 GEN6_RP_UP_BUSY_AVG |
5194 GEN6_RP_DOWN_IDLE_AVG);
5196 /* 6: Ring frequency + overclocking (our driver does this later */
5198 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5199 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5201 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5204 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5206 struct intel_engine_cs *engine;
5207 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5212 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5214 /* Here begins a magic sequence of register writes to enable
5215 * auto-downclocking.
5217 * Perhaps there might be some value in exposing these to
5220 I915_WRITE(GEN6_RC_STATE, 0);
5222 /* Clear the DBG now so we don't confuse earlier errors */
5223 gtfifodbg = I915_READ(GTFIFODBG);
5225 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5226 I915_WRITE(GTFIFODBG, gtfifodbg);
5229 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5231 /* Initialize rps frequencies */
5232 gen6_init_rps_frequencies(dev_priv);
5234 /* disable the counters and set deterministic thresholds */
5235 I915_WRITE(GEN6_RC_CONTROL, 0);
5237 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5238 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5239 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5240 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5241 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5243 for_each_engine(engine, dev_priv)
5244 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5246 I915_WRITE(GEN6_RC_SLEEP, 0);
5247 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5248 if (IS_IVYBRIDGE(dev_priv))
5249 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5251 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5252 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5253 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5255 /* Check if we are enabling RC6 */
5256 rc6_mode = intel_enable_rc6();
5257 if (rc6_mode & INTEL_RC6_ENABLE)
5258 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5260 /* We don't use those on Haswell */
5261 if (!IS_HASWELL(dev_priv)) {
5262 if (rc6_mode & INTEL_RC6p_ENABLE)
5263 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5265 if (rc6_mode & INTEL_RC6pp_ENABLE)
5266 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5269 intel_print_rc6_info(dev_priv, rc6_mask);
5271 I915_WRITE(GEN6_RC_CONTROL,
5273 GEN6_RC_CTL_EI_MODE(1) |
5274 GEN6_RC_CTL_HW_ENABLE);
5276 /* Power down if completely idle for over 50ms */
5277 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5278 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5280 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
5282 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5284 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
5285 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
5286 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
5287 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
5288 (pcu_mbox & 0xff) * 50);
5289 dev_priv->rps.max_freq = pcu_mbox & 0xff;
5292 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5293 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5296 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5297 if (IS_GEN6(dev_priv) && ret) {
5298 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5299 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5300 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5301 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5302 rc6vids &= 0xffff00;
5303 rc6vids |= GEN6_ENCODE_RC6_VID(450);
5304 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5306 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5309 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5312 static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5315 unsigned int gpu_freq;
5316 unsigned int max_ia_freq, min_ring_freq;
5317 unsigned int max_gpu_freq, min_gpu_freq;
5318 int scaling_factor = 180;
5319 struct cpufreq_policy *policy;
5321 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5323 policy = cpufreq_cpu_get(0);
5325 max_ia_freq = policy->cpuinfo.max_freq;
5326 cpufreq_cpu_put(policy);
5329 * Default to measured freq if none found, PCU will ensure we
5332 max_ia_freq = tsc_khz;
5335 /* Convert from kHz to MHz */
5336 max_ia_freq /= 1000;
5338 min_ring_freq = I915_READ(DCLK) & 0xf;
5339 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5340 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5342 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5343 /* Convert GT frequency to 50 HZ units */
5344 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5345 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5347 min_gpu_freq = dev_priv->rps.min_freq;
5348 max_gpu_freq = dev_priv->rps.max_freq;
5352 * For each potential GPU frequency, load a ring frequency we'd like
5353 * to use for memory access. We do this by specifying the IA frequency
5354 * the PCU should use as a reference to determine the ring frequency.
5356 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5357 int diff = max_gpu_freq - gpu_freq;
5358 unsigned int ia_freq = 0, ring_freq = 0;
5360 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5362 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5363 * No floor required for ring frequency on SKL.
5365 ring_freq = gpu_freq;
5366 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5367 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5368 ring_freq = max(min_ring_freq, gpu_freq);
5369 } else if (IS_HASWELL(dev_priv)) {
5370 ring_freq = mult_frac(gpu_freq, 5, 4);
5371 ring_freq = max(min_ring_freq, ring_freq);
5372 /* leave ia_freq as the default, chosen by cpufreq */
5374 /* On older processors, there is no separate ring
5375 * clock domain, so in order to boost the bandwidth
5376 * of the ring, we need to upclock the CPU (ia_freq).
5378 * For GPU frequencies less than 750MHz,
5379 * just use the lowest ring freq.
5381 if (gpu_freq < min_freq)
5384 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5385 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5388 sandybridge_pcode_write(dev_priv,
5389 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5390 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5391 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5396 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5398 if (!HAS_CORE_RING_FREQ(dev_priv))
5401 mutex_lock(&dev_priv->rps.hw_lock);
5402 __gen6_update_ring_freq(dev_priv);
5403 mutex_unlock(&dev_priv->rps.hw_lock);
5406 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5410 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5412 switch (INTEL_INFO(dev_priv)->eu_total) {
5414 /* (2 * 4) config */
5415 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5418 /* (2 * 6) config */
5419 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5422 /* (2 * 8) config */
5424 /* Setting (2 * 8) Min RP0 for any other combination */
5425 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5429 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5434 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5438 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5439 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5444 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5448 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5449 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5454 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5458 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5460 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5465 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5469 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5471 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5473 rp0 = min_t(u32, rp0, 0xea);
5478 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5482 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5483 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5484 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5485 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5490 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5494 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5496 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5497 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5498 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5499 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5500 * to make sure it matches what Punit accepts.
5502 return max_t(u32, val, 0xc0);
5505 /* Check that the pctx buffer wasn't move under us. */
5506 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5508 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5510 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5511 dev_priv->vlv_pctx->stolen->start);
5515 /* Check that the pcbr address is not empty. */
5516 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5518 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5520 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5523 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5525 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5526 unsigned long pctx_paddr, paddr;
5528 int pctx_size = 32*1024;
5530 pcbr = I915_READ(VLV_PCBR);
5531 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5532 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5533 paddr = (dev_priv->mm.stolen_base +
5534 (ggtt->stolen_size - pctx_size));
5536 pctx_paddr = (paddr & (~4095));
5537 I915_WRITE(VLV_PCBR, pctx_paddr);
5540 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5543 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5545 struct drm_i915_gem_object *pctx;
5546 unsigned long pctx_paddr;
5548 int pctx_size = 24*1024;
5550 mutex_lock(&dev_priv->dev->struct_mutex);
5552 pcbr = I915_READ(VLV_PCBR);
5554 /* BIOS set it up already, grab the pre-alloc'd space */
5557 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5558 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5560 I915_GTT_OFFSET_NONE,
5565 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5568 * From the Gunit register HAS:
5569 * The Gfx driver is expected to program this register and ensure
5570 * proper allocation within Gfx stolen memory. For example, this
5571 * register should be programmed such than the PCBR range does not
5572 * overlap with other ranges, such as the frame buffer, protected
5573 * memory, or any other relevant ranges.
5575 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
5577 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5581 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5582 I915_WRITE(VLV_PCBR, pctx_paddr);
5585 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5586 dev_priv->vlv_pctx = pctx;
5587 mutex_unlock(&dev_priv->dev->struct_mutex);
5590 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5592 if (WARN_ON(!dev_priv->vlv_pctx))
5595 drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
5596 dev_priv->vlv_pctx = NULL;
5599 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5601 dev_priv->rps.gpll_ref_freq =
5602 vlv_get_cck_clock(dev_priv, "GPLL ref",
5603 CCK_GPLL_CLOCK_CONTROL,
5604 dev_priv->czclk_freq);
5606 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5607 dev_priv->rps.gpll_ref_freq);
5610 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5614 valleyview_setup_pctx(dev_priv);
5616 vlv_init_gpll_ref_freq(dev_priv);
5618 mutex_lock(&dev_priv->rps.hw_lock);
5620 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5621 switch ((val >> 6) & 3) {
5624 dev_priv->mem_freq = 800;
5627 dev_priv->mem_freq = 1066;
5630 dev_priv->mem_freq = 1333;
5633 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5635 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5636 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5637 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5638 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5639 dev_priv->rps.max_freq);
5641 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5642 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5643 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5644 dev_priv->rps.efficient_freq);
5646 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5647 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5648 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5649 dev_priv->rps.rp1_freq);
5651 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5652 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5653 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5654 dev_priv->rps.min_freq);
5656 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5658 /* Preserve min/max settings in case of re-init */
5659 if (dev_priv->rps.max_freq_softlimit == 0)
5660 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5662 if (dev_priv->rps.min_freq_softlimit == 0)
5663 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5665 mutex_unlock(&dev_priv->rps.hw_lock);
5668 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5672 cherryview_setup_pctx(dev_priv);
5674 vlv_init_gpll_ref_freq(dev_priv);
5676 mutex_lock(&dev_priv->rps.hw_lock);
5678 mutex_lock(&dev_priv->sb_lock);
5679 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5680 mutex_unlock(&dev_priv->sb_lock);
5682 switch ((val >> 2) & 0x7) {
5684 dev_priv->mem_freq = 2000;
5687 dev_priv->mem_freq = 1600;
5690 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5692 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5693 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5694 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5695 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5696 dev_priv->rps.max_freq);
5698 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5699 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5700 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5701 dev_priv->rps.efficient_freq);
5703 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5704 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5705 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5706 dev_priv->rps.rp1_freq);
5708 /* PUnit validated range is only [RPe, RP0] */
5709 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5710 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5711 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5712 dev_priv->rps.min_freq);
5714 WARN_ONCE((dev_priv->rps.max_freq |
5715 dev_priv->rps.efficient_freq |
5716 dev_priv->rps.rp1_freq |
5717 dev_priv->rps.min_freq) & 1,
5718 "Odd GPU freq values\n");
5720 dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
5722 /* Preserve min/max settings in case of re-init */
5723 if (dev_priv->rps.max_freq_softlimit == 0)
5724 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5726 if (dev_priv->rps.min_freq_softlimit == 0)
5727 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5729 mutex_unlock(&dev_priv->rps.hw_lock);
5732 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5734 valleyview_cleanup_pctx(dev_priv);
5737 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5739 struct intel_engine_cs *engine;
5740 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5742 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5744 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5745 GT_FIFO_FREE_ENTRIES_CHV);
5747 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5749 I915_WRITE(GTFIFODBG, gtfifodbg);
5752 cherryview_check_pctx(dev_priv);
5754 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5755 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5756 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5758 /* Disable RC states. */
5759 I915_WRITE(GEN6_RC_CONTROL, 0);
5761 /* 2a: Program RC6 thresholds.*/
5762 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5763 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5764 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5766 for_each_engine(engine, dev_priv)
5767 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5768 I915_WRITE(GEN6_RC_SLEEP, 0);
5770 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5771 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5773 /* allows RC6 residency counter to work */
5774 I915_WRITE(VLV_COUNTER_CONTROL,
5775 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5776 VLV_MEDIA_RC6_COUNT_EN |
5777 VLV_RENDER_RC6_COUNT_EN));
5779 /* For now we assume BIOS is allocating and populating the PCBR */
5780 pcbr = I915_READ(VLV_PCBR);
5783 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5784 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5785 rc6_mode = GEN7_RC_CTL_TO_MODE;
5787 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5789 /* 4 Program defaults and thresholds for RPS*/
5790 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5791 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5792 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5793 I915_WRITE(GEN6_RP_UP_EI, 66000);
5794 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5796 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5799 I915_WRITE(GEN6_RP_CONTROL,
5800 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5801 GEN6_RP_MEDIA_IS_GFX |
5803 GEN6_RP_UP_BUSY_AVG |
5804 GEN6_RP_DOWN_IDLE_AVG);
5806 /* Setting Fixed Bias */
5807 val = VLV_OVERRIDE_EN |
5809 CHV_BIAS_CPU_50_SOC_50;
5810 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5812 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5814 /* RPS code assumes GPLL is used */
5815 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5817 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5818 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5820 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5821 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5822 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5823 dev_priv->rps.cur_freq);
5825 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5826 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5827 dev_priv->rps.idle_freq);
5829 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5831 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5834 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5836 struct intel_engine_cs *engine;
5837 u32 gtfifodbg, val, rc6_mode = 0;
5839 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5841 valleyview_check_pctx(dev_priv);
5843 gtfifodbg = I915_READ(GTFIFODBG);
5845 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5847 I915_WRITE(GTFIFODBG, gtfifodbg);
5850 /* If VLV, Forcewake all wells, else re-direct to regular path */
5851 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5853 /* Disable RC states. */
5854 I915_WRITE(GEN6_RC_CONTROL, 0);
5856 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5857 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5858 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5859 I915_WRITE(GEN6_RP_UP_EI, 66000);
5860 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5862 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5864 I915_WRITE(GEN6_RP_CONTROL,
5865 GEN6_RP_MEDIA_TURBO |
5866 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5867 GEN6_RP_MEDIA_IS_GFX |
5869 GEN6_RP_UP_BUSY_AVG |
5870 GEN6_RP_DOWN_IDLE_CONT);
5872 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5873 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5874 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5876 for_each_engine(engine, dev_priv)
5877 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5879 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5881 /* allows RC6 residency counter to work */
5882 I915_WRITE(VLV_COUNTER_CONTROL,
5883 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5884 VLV_RENDER_RC0_COUNT_EN |
5885 VLV_MEDIA_RC6_COUNT_EN |
5886 VLV_RENDER_RC6_COUNT_EN));
5888 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5889 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5891 intel_print_rc6_info(dev_priv, rc6_mode);
5893 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5895 /* Setting Fixed Bias */
5896 val = VLV_OVERRIDE_EN |
5898 VLV_BIAS_CPU_125_SOC_875;
5899 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5901 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5903 /* RPS code assumes GPLL is used */
5904 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5906 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5907 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5909 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5910 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5911 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5912 dev_priv->rps.cur_freq);
5914 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5915 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5916 dev_priv->rps.idle_freq);
5918 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5920 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5923 static unsigned long intel_pxfreq(u32 vidfreq)
5926 int div = (vidfreq & 0x3f0000) >> 16;
5927 int post = (vidfreq & 0x3000) >> 12;
5928 int pre = (vidfreq & 0x7);
5933 freq = ((div * 133333) / ((1<<post) * pre));
5938 static const struct cparams {
5944 { 1, 1333, 301, 28664 },
5945 { 1, 1066, 294, 24460 },
5946 { 1, 800, 294, 25192 },
5947 { 0, 1333, 276, 27605 },
5948 { 0, 1066, 276, 27605 },
5949 { 0, 800, 231, 23784 },
5952 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5954 u64 total_count, diff, ret;
5955 u32 count1, count2, count3, m = 0, c = 0;
5956 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5959 assert_spin_locked(&mchdev_lock);
5961 diff1 = now - dev_priv->ips.last_time1;
5963 /* Prevent division-by-zero if we are asking too fast.
5964 * Also, we don't get interesting results if we are polling
5965 * faster than once in 10ms, so just return the saved value
5969 return dev_priv->ips.chipset_power;
5971 count1 = I915_READ(DMIEC);
5972 count2 = I915_READ(DDREC);
5973 count3 = I915_READ(CSIEC);
5975 total_count = count1 + count2 + count3;
5977 /* FIXME: handle per-counter overflow */
5978 if (total_count < dev_priv->ips.last_count1) {
5979 diff = ~0UL - dev_priv->ips.last_count1;
5980 diff += total_count;
5982 diff = total_count - dev_priv->ips.last_count1;
5985 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5986 if (cparams[i].i == dev_priv->ips.c_m &&
5987 cparams[i].t == dev_priv->ips.r_t) {
5994 diff = div_u64(diff, diff1);
5995 ret = ((m * diff) + c);
5996 ret = div_u64(ret, 10);
5998 dev_priv->ips.last_count1 = total_count;
5999 dev_priv->ips.last_time1 = now;
6001 dev_priv->ips.chipset_power = ret;
6006 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6010 if (INTEL_INFO(dev_priv)->gen != 5)
6013 spin_lock_irq(&mchdev_lock);
6015 val = __i915_chipset_val(dev_priv);
6017 spin_unlock_irq(&mchdev_lock);
6022 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6024 unsigned long m, x, b;
6027 tsfs = I915_READ(TSFS);
6029 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6030 x = I915_READ8(TR1);
6032 b = tsfs & TSFS_INTR_MASK;
6034 return ((m * x) / 127) - b;
6037 static int _pxvid_to_vd(u8 pxvid)
6042 if (pxvid >= 8 && pxvid < 31)
6045 return (pxvid + 2) * 125;
6048 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6050 const int vd = _pxvid_to_vd(pxvid);
6051 const int vm = vd - 1125;
6053 if (INTEL_INFO(dev_priv)->is_mobile)
6054 return vm > 0 ? vm : 0;
6059 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6061 u64 now, diff, diffms;
6064 assert_spin_locked(&mchdev_lock);
6066 now = ktime_get_raw_ns();
6067 diffms = now - dev_priv->ips.last_time2;
6068 do_div(diffms, NSEC_PER_MSEC);
6070 /* Don't divide by 0 */
6074 count = I915_READ(GFXEC);
6076 if (count < dev_priv->ips.last_count2) {
6077 diff = ~0UL - dev_priv->ips.last_count2;
6080 diff = count - dev_priv->ips.last_count2;
6083 dev_priv->ips.last_count2 = count;
6084 dev_priv->ips.last_time2 = now;
6086 /* More magic constants... */
6088 diff = div_u64(diff, diffms * 10);
6089 dev_priv->ips.gfx_power = diff;
6092 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6094 if (INTEL_INFO(dev_priv)->gen != 5)
6097 spin_lock_irq(&mchdev_lock);
6099 __i915_update_gfx_val(dev_priv);
6101 spin_unlock_irq(&mchdev_lock);
6104 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6106 unsigned long t, corr, state1, corr2, state2;
6109 assert_spin_locked(&mchdev_lock);
6111 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6112 pxvid = (pxvid >> 24) & 0x7f;
6113 ext_v = pvid_to_extvid(dev_priv, pxvid);
6117 t = i915_mch_val(dev_priv);
6119 /* Revel in the empirically derived constants */
6121 /* Correction factor in 1/100000 units */
6123 corr = ((t * 2349) + 135940);
6125 corr = ((t * 964) + 29317);
6127 corr = ((t * 301) + 1004);
6129 corr = corr * ((150142 * state1) / 10000 - 78642);
6131 corr2 = (corr * dev_priv->ips.corr);
6133 state2 = (corr2 * state1) / 10000;
6134 state2 /= 100; /* convert to mW */
6136 __i915_update_gfx_val(dev_priv);
6138 return dev_priv->ips.gfx_power + state2;
6141 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6145 if (INTEL_INFO(dev_priv)->gen != 5)
6148 spin_lock_irq(&mchdev_lock);
6150 val = __i915_gfx_val(dev_priv);
6152 spin_unlock_irq(&mchdev_lock);
6158 * i915_read_mch_val - return value for IPS use
6160 * Calculate and return a value for the IPS driver to use when deciding whether
6161 * we have thermal and power headroom to increase CPU or GPU power budget.
6163 unsigned long i915_read_mch_val(void)
6165 struct drm_i915_private *dev_priv;
6166 unsigned long chipset_val, graphics_val, ret = 0;
6168 spin_lock_irq(&mchdev_lock);
6171 dev_priv = i915_mch_dev;
6173 chipset_val = __i915_chipset_val(dev_priv);
6174 graphics_val = __i915_gfx_val(dev_priv);
6176 ret = chipset_val + graphics_val;
6179 spin_unlock_irq(&mchdev_lock);
6183 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6186 * i915_gpu_raise - raise GPU frequency limit
6188 * Raise the limit; IPS indicates we have thermal headroom.
6190 bool i915_gpu_raise(void)
6192 struct drm_i915_private *dev_priv;
6195 spin_lock_irq(&mchdev_lock);
6196 if (!i915_mch_dev) {
6200 dev_priv = i915_mch_dev;
6202 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6203 dev_priv->ips.max_delay--;
6206 spin_unlock_irq(&mchdev_lock);
6210 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6213 * i915_gpu_lower - lower GPU frequency limit
6215 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6216 * frequency maximum.
6218 bool i915_gpu_lower(void)
6220 struct drm_i915_private *dev_priv;
6223 spin_lock_irq(&mchdev_lock);
6224 if (!i915_mch_dev) {
6228 dev_priv = i915_mch_dev;
6230 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6231 dev_priv->ips.max_delay++;
6234 spin_unlock_irq(&mchdev_lock);
6238 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6241 * i915_gpu_busy - indicate GPU business to IPS
6243 * Tell the IPS driver whether or not the GPU is busy.
6245 bool i915_gpu_busy(void)
6247 struct drm_i915_private *dev_priv;
6248 struct intel_engine_cs *engine;
6251 spin_lock_irq(&mchdev_lock);
6254 dev_priv = i915_mch_dev;
6256 for_each_engine(engine, dev_priv)
6257 ret |= !list_empty(&engine->request_list);
6260 spin_unlock_irq(&mchdev_lock);
6264 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6267 * i915_gpu_turbo_disable - disable graphics turbo
6269 * Disable graphics turbo by resetting the max frequency and setting the
6270 * current frequency to the default.
6272 bool i915_gpu_turbo_disable(void)
6274 struct drm_i915_private *dev_priv;
6277 spin_lock_irq(&mchdev_lock);
6278 if (!i915_mch_dev) {
6282 dev_priv = i915_mch_dev;
6284 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6286 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6290 spin_unlock_irq(&mchdev_lock);
6294 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6297 * Tells the intel_ips driver that the i915 driver is now loaded, if
6298 * IPS got loaded first.
6300 * This awkward dance is so that neither module has to depend on the
6301 * other in order for IPS to do the appropriate communication of
6302 * GPU turbo limits to i915.
6305 ips_ping_for_i915_load(void)
6309 link = symbol_get(ips_link_to_i915_driver);
6312 symbol_put(ips_link_to_i915_driver);
6316 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6318 /* We only register the i915 ips part with intel-ips once everything is
6319 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6320 spin_lock_irq(&mchdev_lock);
6321 i915_mch_dev = dev_priv;
6322 spin_unlock_irq(&mchdev_lock);
6324 ips_ping_for_i915_load();
6327 void intel_gpu_ips_teardown(void)
6329 spin_lock_irq(&mchdev_lock);
6330 i915_mch_dev = NULL;
6331 spin_unlock_irq(&mchdev_lock);
6334 static void intel_init_emon(struct drm_i915_private *dev_priv)
6340 /* Disable to program */
6344 /* Program energy weights for various events */
6345 I915_WRITE(SDEW, 0x15040d00);
6346 I915_WRITE(CSIEW0, 0x007f0000);
6347 I915_WRITE(CSIEW1, 0x1e220004);
6348 I915_WRITE(CSIEW2, 0x04000004);
6350 for (i = 0; i < 5; i++)
6351 I915_WRITE(PEW(i), 0);
6352 for (i = 0; i < 3; i++)
6353 I915_WRITE(DEW(i), 0);
6355 /* Program P-state weights to account for frequency power adjustment */
6356 for (i = 0; i < 16; i++) {
6357 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6358 unsigned long freq = intel_pxfreq(pxvidfreq);
6359 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6364 val *= (freq / 1000);
6366 val /= (127*127*900);
6368 DRM_ERROR("bad pxval: %ld\n", val);
6371 /* Render standby states get 0 weight */
6375 for (i = 0; i < 4; i++) {
6376 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6377 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6378 I915_WRITE(PXW(i), val);
6381 /* Adjust magic regs to magic values (more experimental results) */
6382 I915_WRITE(OGW0, 0);
6383 I915_WRITE(OGW1, 0);
6384 I915_WRITE(EG0, 0x00007f00);
6385 I915_WRITE(EG1, 0x0000000e);
6386 I915_WRITE(EG2, 0x000e0000);
6387 I915_WRITE(EG3, 0x68000300);
6388 I915_WRITE(EG4, 0x42000000);
6389 I915_WRITE(EG5, 0x00140031);
6393 for (i = 0; i < 8; i++)
6394 I915_WRITE(PXWL(i), 0);
6396 /* Enable PMON + select events */
6397 I915_WRITE(ECR, 0x80000019);
6399 lcfuse = I915_READ(LCFUSE02);
6401 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6404 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6407 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6410 if (!i915.enable_rc6) {
6411 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6412 intel_runtime_pm_get(dev_priv);
6415 if (IS_CHERRYVIEW(dev_priv))
6416 cherryview_init_gt_powersave(dev_priv);
6417 else if (IS_VALLEYVIEW(dev_priv))
6418 valleyview_init_gt_powersave(dev_priv);
6421 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6423 if (IS_CHERRYVIEW(dev_priv))
6425 else if (IS_VALLEYVIEW(dev_priv))
6426 valleyview_cleanup_gt_powersave(dev_priv);
6428 if (!i915.enable_rc6)
6429 intel_runtime_pm_put(dev_priv);
6432 static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
6434 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6436 gen6_disable_rps_interrupts(dev_priv);
6440 * intel_suspend_gt_powersave - suspend PM work and helper threads
6441 * @dev_priv: i915 device
6443 * We don't want to disable RC6 or other features here, we just want
6444 * to make sure any work we've queued has finished and won't bother
6445 * us while we're suspended.
6447 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6449 if (INTEL_GEN(dev_priv) < 6)
6452 gen6_suspend_rps(dev_priv);
6454 /* Force GPU to min freq during suspend */
6455 gen6_rps_idle(dev_priv);
6458 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6460 if (IS_IRONLAKE_M(dev_priv)) {
6461 ironlake_disable_drps(dev_priv);
6462 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6463 intel_suspend_gt_powersave(dev_priv);
6465 mutex_lock(&dev_priv->rps.hw_lock);
6466 if (INTEL_INFO(dev_priv)->gen >= 9) {
6467 gen9_disable_rc6(dev_priv);
6468 gen9_disable_rps(dev_priv);
6469 } else if (IS_CHERRYVIEW(dev_priv))
6470 cherryview_disable_rps(dev_priv);
6471 else if (IS_VALLEYVIEW(dev_priv))
6472 valleyview_disable_rps(dev_priv);
6474 gen6_disable_rps(dev_priv);
6476 dev_priv->rps.enabled = false;
6477 mutex_unlock(&dev_priv->rps.hw_lock);
6481 static void intel_gen6_powersave_work(struct work_struct *work)
6483 struct drm_i915_private *dev_priv =
6484 container_of(work, struct drm_i915_private,
6485 rps.delayed_resume_work.work);
6487 mutex_lock(&dev_priv->rps.hw_lock);
6489 gen6_reset_rps_interrupts(dev_priv);
6491 if (IS_CHERRYVIEW(dev_priv)) {
6492 cherryview_enable_rps(dev_priv);
6493 } else if (IS_VALLEYVIEW(dev_priv)) {
6494 valleyview_enable_rps(dev_priv);
6495 } else if (INTEL_INFO(dev_priv)->gen >= 9) {
6496 gen9_enable_rc6(dev_priv);
6497 gen9_enable_rps(dev_priv);
6498 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6499 __gen6_update_ring_freq(dev_priv);
6500 } else if (IS_BROADWELL(dev_priv)) {
6501 gen8_enable_rps(dev_priv);
6502 __gen6_update_ring_freq(dev_priv);
6504 gen6_enable_rps(dev_priv);
6505 __gen6_update_ring_freq(dev_priv);
6508 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6509 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6511 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6512 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6514 dev_priv->rps.enabled = true;
6516 gen6_enable_rps_interrupts(dev_priv);
6518 mutex_unlock(&dev_priv->rps.hw_lock);
6520 intel_runtime_pm_put(dev_priv);
6523 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6525 /* Powersaving is controlled by the host when inside a VM */
6526 if (intel_vgpu_active(dev_priv))
6529 if (IS_IRONLAKE_M(dev_priv)) {
6530 ironlake_enable_drps(dev_priv);
6531 mutex_lock(&dev_priv->dev->struct_mutex);
6532 intel_init_emon(dev_priv);
6533 mutex_unlock(&dev_priv->dev->struct_mutex);
6534 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6536 * PCU communication is slow and this doesn't need to be
6537 * done at any specific time, so do this out of our fast path
6538 * to make resume and init faster.
6540 * We depend on the HW RC6 power context save/restore
6541 * mechanism when entering D3 through runtime PM suspend. So
6542 * disable RPM until RPS/RC6 is properly setup. We can only
6543 * get here via the driver load/system resume/runtime resume
6544 * paths, so the _noresume version is enough (and in case of
6545 * runtime resume it's necessary).
6547 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6548 round_jiffies_up_relative(HZ)))
6549 intel_runtime_pm_get_noresume(dev_priv);
6553 void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6555 if (INTEL_INFO(dev_priv)->gen < 6)
6558 gen6_suspend_rps(dev_priv);
6559 dev_priv->rps.enabled = false;
6562 static void ibx_init_clock_gating(struct drm_device *dev)
6564 struct drm_i915_private *dev_priv = dev->dev_private;
6567 * On Ibex Peak and Cougar Point, we need to disable clock
6568 * gating for the panel power sequencer or it will fail to
6569 * start up when no ports are active.
6571 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6574 static void g4x_disable_trickle_feed(struct drm_device *dev)
6576 struct drm_i915_private *dev_priv = dev->dev_private;
6579 for_each_pipe(dev_priv, pipe) {
6580 I915_WRITE(DSPCNTR(pipe),
6581 I915_READ(DSPCNTR(pipe)) |
6582 DISPPLANE_TRICKLE_FEED_DISABLE);
6584 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6585 POSTING_READ(DSPSURF(pipe));
6589 static void ilk_init_lp_watermarks(struct drm_device *dev)
6591 struct drm_i915_private *dev_priv = dev->dev_private;
6593 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6594 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6595 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6598 * Don't touch WM1S_LP_EN here.
6599 * Doing so could cause underruns.
6603 static void ironlake_init_clock_gating(struct drm_device *dev)
6605 struct drm_i915_private *dev_priv = dev->dev_private;
6606 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6610 * WaFbcDisableDpfcClockGating:ilk
6612 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6613 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6614 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6616 I915_WRITE(PCH_3DCGDIS0,
6617 MARIUNIT_CLOCK_GATE_DISABLE |
6618 SVSMUNIT_CLOCK_GATE_DISABLE);
6619 I915_WRITE(PCH_3DCGDIS1,
6620 VFMUNIT_CLOCK_GATE_DISABLE);
6623 * According to the spec the following bits should be set in
6624 * order to enable memory self-refresh
6625 * The bit 22/21 of 0x42004
6626 * The bit 5 of 0x42020
6627 * The bit 15 of 0x45000
6629 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6630 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6631 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6632 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6633 I915_WRITE(DISP_ARB_CTL,
6634 (I915_READ(DISP_ARB_CTL) |
6637 ilk_init_lp_watermarks(dev);
6640 * Based on the document from hardware guys the following bits
6641 * should be set unconditionally in order to enable FBC.
6642 * The bit 22 of 0x42000
6643 * The bit 22 of 0x42004
6644 * The bit 7,8,9 of 0x42020.
6646 if (IS_IRONLAKE_M(dev)) {
6647 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6648 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6649 I915_READ(ILK_DISPLAY_CHICKEN1) |
6651 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6652 I915_READ(ILK_DISPLAY_CHICKEN2) |
6656 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6658 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6659 I915_READ(ILK_DISPLAY_CHICKEN2) |
6660 ILK_ELPIN_409_SELECT);
6661 I915_WRITE(_3D_CHICKEN2,
6662 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6663 _3D_CHICKEN2_WM_READ_PIPELINED);
6665 /* WaDisableRenderCachePipelinedFlush:ilk */
6666 I915_WRITE(CACHE_MODE_0,
6667 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6669 /* WaDisable_RenderCache_OperationalFlush:ilk */
6670 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6672 g4x_disable_trickle_feed(dev);
6674 ibx_init_clock_gating(dev);
6677 static void cpt_init_clock_gating(struct drm_device *dev)
6679 struct drm_i915_private *dev_priv = dev->dev_private;
6684 * On Ibex Peak and Cougar Point, we need to disable clock
6685 * gating for the panel power sequencer or it will fail to
6686 * start up when no ports are active.
6688 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6689 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6690 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6691 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6692 DPLS_EDP_PPS_FIX_DIS);
6693 /* The below fixes the weird display corruption, a few pixels shifted
6694 * downward, on (only) LVDS of some HP laptops with IVY.
6696 for_each_pipe(dev_priv, pipe) {
6697 val = I915_READ(TRANS_CHICKEN2(pipe));
6698 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6699 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6700 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6701 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6702 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6703 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6704 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6705 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6707 /* WADP0ClockGatingDisable */
6708 for_each_pipe(dev_priv, pipe) {
6709 I915_WRITE(TRANS_CHICKEN1(pipe),
6710 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6714 static void gen6_check_mch_setup(struct drm_device *dev)
6716 struct drm_i915_private *dev_priv = dev->dev_private;
6719 tmp = I915_READ(MCH_SSKPD);
6720 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6721 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6725 static void gen6_init_clock_gating(struct drm_device *dev)
6727 struct drm_i915_private *dev_priv = dev->dev_private;
6728 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6730 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6732 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6733 I915_READ(ILK_DISPLAY_CHICKEN2) |
6734 ILK_ELPIN_409_SELECT);
6736 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6737 I915_WRITE(_3D_CHICKEN,
6738 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6740 /* WaDisable_RenderCache_OperationalFlush:snb */
6741 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6744 * BSpec recoomends 8x4 when MSAA is used,
6745 * however in practice 16x4 seems fastest.
6747 * Note that PS/WM thread counts depend on the WIZ hashing
6748 * disable bit, which we don't touch here, but it's good
6749 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6751 I915_WRITE(GEN6_GT_MODE,
6752 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6754 ilk_init_lp_watermarks(dev);
6756 I915_WRITE(CACHE_MODE_0,
6757 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6759 I915_WRITE(GEN6_UCGCTL1,
6760 I915_READ(GEN6_UCGCTL1) |
6761 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6762 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6764 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6765 * gating disable must be set. Failure to set it results in
6766 * flickering pixels due to Z write ordering failures after
6767 * some amount of runtime in the Mesa "fire" demo, and Unigine
6768 * Sanctuary and Tropics, and apparently anything else with
6769 * alpha test or pixel discard.
6771 * According to the spec, bit 11 (RCCUNIT) must also be set,
6772 * but we didn't debug actual testcases to find it out.
6774 * WaDisableRCCUnitClockGating:snb
6775 * WaDisableRCPBUnitClockGating:snb
6777 I915_WRITE(GEN6_UCGCTL2,
6778 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6779 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6781 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6782 I915_WRITE(_3D_CHICKEN3,
6783 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6787 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6788 * 3DSTATE_SF number of SF output attributes is more than 16."
6790 I915_WRITE(_3D_CHICKEN3,
6791 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6794 * According to the spec the following bits should be
6795 * set in order to enable memory self-refresh and fbc:
6796 * The bit21 and bit22 of 0x42000
6797 * The bit21 and bit22 of 0x42004
6798 * The bit5 and bit7 of 0x42020
6799 * The bit14 of 0x70180
6800 * The bit14 of 0x71180
6802 * WaFbcAsynchFlipDisableFbcQueue:snb
6804 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6805 I915_READ(ILK_DISPLAY_CHICKEN1) |
6806 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6807 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6808 I915_READ(ILK_DISPLAY_CHICKEN2) |
6809 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6810 I915_WRITE(ILK_DSPCLK_GATE_D,
6811 I915_READ(ILK_DSPCLK_GATE_D) |
6812 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6813 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6815 g4x_disable_trickle_feed(dev);
6817 cpt_init_clock_gating(dev);
6819 gen6_check_mch_setup(dev);
6822 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6824 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6827 * WaVSThreadDispatchOverride:ivb,vlv
6829 * This actually overrides the dispatch
6830 * mode for all thread types.
6832 reg &= ~GEN7_FF_SCHED_MASK;
6833 reg |= GEN7_FF_TS_SCHED_HW;
6834 reg |= GEN7_FF_VS_SCHED_HW;
6835 reg |= GEN7_FF_DS_SCHED_HW;
6837 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6840 static void lpt_init_clock_gating(struct drm_device *dev)
6842 struct drm_i915_private *dev_priv = dev->dev_private;
6845 * TODO: this bit should only be enabled when really needed, then
6846 * disabled when not needed anymore in order to save power.
6848 if (HAS_PCH_LPT_LP(dev))
6849 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6850 I915_READ(SOUTH_DSPCLK_GATE_D) |
6851 PCH_LP_PARTITION_LEVEL_DISABLE);
6853 /* WADPOClockGatingDisable:hsw */
6854 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6855 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6856 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6859 static void lpt_suspend_hw(struct drm_device *dev)
6861 struct drm_i915_private *dev_priv = dev->dev_private;
6863 if (HAS_PCH_LPT_LP(dev)) {
6864 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6866 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6867 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6871 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6872 int general_prio_credits,
6873 int high_prio_credits)
6877 /* WaTempDisableDOPClkGating:bdw */
6878 misccpctl = I915_READ(GEN7_MISCCPCTL);
6879 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6881 I915_WRITE(GEN8_L3SQCREG1,
6882 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
6883 L3_HIGH_PRIO_CREDITS(high_prio_credits));
6886 * Wait at least 100 clocks before re-enabling clock gating.
6887 * See the definition of L3SQCREG1 in BSpec.
6889 POSTING_READ(GEN8_L3SQCREG1);
6891 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6894 static void skylake_init_clock_gating(struct drm_device *dev)
6896 struct drm_i915_private *dev_priv = dev->dev_private;
6898 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */
6899 I915_WRITE(CHICKEN_PAR1_1,
6900 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
6903 static void broadwell_init_clock_gating(struct drm_device *dev)
6905 struct drm_i915_private *dev_priv = dev->dev_private;
6908 ilk_init_lp_watermarks(dev);
6910 /* WaSwitchSolVfFArbitrationPriority:bdw */
6911 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6913 /* WaPsrDPAMaskVBlankInSRD:bdw */
6914 I915_WRITE(CHICKEN_PAR1_1,
6915 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6917 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6918 for_each_pipe(dev_priv, pipe) {
6919 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6920 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6921 BDW_DPRS_MASK_VBLANK_SRD);
6924 /* WaVSRefCountFullforceMissDisable:bdw */
6925 /* WaDSRefCountFullforceMissDisable:bdw */
6926 I915_WRITE(GEN7_FF_THREAD_MODE,
6927 I915_READ(GEN7_FF_THREAD_MODE) &
6928 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6930 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6931 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6933 /* WaDisableSDEUnitClockGating:bdw */
6934 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6935 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6937 /* WaProgramL3SqcReg1Default:bdw */
6938 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6941 * WaGttCachingOffByDefault:bdw
6942 * GTT cache may not work with big pages, so if those
6943 * are ever enabled GTT cache may need to be disabled.
6945 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6947 lpt_init_clock_gating(dev);
6950 static void haswell_init_clock_gating(struct drm_device *dev)
6952 struct drm_i915_private *dev_priv = dev->dev_private;
6954 ilk_init_lp_watermarks(dev);
6956 /* L3 caching of data atomics doesn't work -- disable it. */
6957 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6958 I915_WRITE(HSW_ROW_CHICKEN3,
6959 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6961 /* This is required by WaCatErrorRejectionIssue:hsw */
6962 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6963 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6964 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6966 /* WaVSRefCountFullforceMissDisable:hsw */
6967 I915_WRITE(GEN7_FF_THREAD_MODE,
6968 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6970 /* WaDisable_RenderCache_OperationalFlush:hsw */
6971 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6973 /* enable HiZ Raw Stall Optimization */
6974 I915_WRITE(CACHE_MODE_0_GEN7,
6975 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6977 /* WaDisable4x2SubspanOptimization:hsw */
6978 I915_WRITE(CACHE_MODE_1,
6979 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6982 * BSpec recommends 8x4 when MSAA is used,
6983 * however in practice 16x4 seems fastest.
6985 * Note that PS/WM thread counts depend on the WIZ hashing
6986 * disable bit, which we don't touch here, but it's good
6987 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6989 I915_WRITE(GEN7_GT_MODE,
6990 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6992 /* WaSampleCChickenBitEnable:hsw */
6993 I915_WRITE(HALF_SLICE_CHICKEN3,
6994 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
6996 /* WaSwitchSolVfFArbitrationPriority:hsw */
6997 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6999 /* WaRsPkgCStateDisplayPMReq:hsw */
7000 I915_WRITE(CHICKEN_PAR1_1,
7001 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7003 lpt_init_clock_gating(dev);
7006 static void ivybridge_init_clock_gating(struct drm_device *dev)
7008 struct drm_i915_private *dev_priv = dev->dev_private;
7011 ilk_init_lp_watermarks(dev);
7013 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7015 /* WaDisableEarlyCull:ivb */
7016 I915_WRITE(_3D_CHICKEN3,
7017 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7019 /* WaDisableBackToBackFlipFix:ivb */
7020 I915_WRITE(IVB_CHICKEN3,
7021 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7022 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7024 /* WaDisablePSDDualDispatchEnable:ivb */
7025 if (IS_IVB_GT1(dev))
7026 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7027 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7029 /* WaDisable_RenderCache_OperationalFlush:ivb */
7030 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7032 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7033 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7034 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7036 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7037 I915_WRITE(GEN7_L3CNTLREG1,
7038 GEN7_WA_FOR_GEN7_L3_CONTROL);
7039 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7040 GEN7_WA_L3_CHICKEN_MODE);
7041 if (IS_IVB_GT1(dev))
7042 I915_WRITE(GEN7_ROW_CHICKEN2,
7043 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7045 /* must write both registers */
7046 I915_WRITE(GEN7_ROW_CHICKEN2,
7047 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7048 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7049 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7052 /* WaForceL3Serialization:ivb */
7053 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7054 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7057 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7058 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7060 I915_WRITE(GEN6_UCGCTL2,
7061 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7063 /* This is required by WaCatErrorRejectionIssue:ivb */
7064 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7065 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7066 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7068 g4x_disable_trickle_feed(dev);
7070 gen7_setup_fixed_func_scheduler(dev_priv);
7072 if (0) { /* causes HiZ corruption on ivb:gt1 */
7073 /* enable HiZ Raw Stall Optimization */
7074 I915_WRITE(CACHE_MODE_0_GEN7,
7075 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7078 /* WaDisable4x2SubspanOptimization:ivb */
7079 I915_WRITE(CACHE_MODE_1,
7080 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7083 * BSpec recommends 8x4 when MSAA is used,
7084 * however in practice 16x4 seems fastest.
7086 * Note that PS/WM thread counts depend on the WIZ hashing
7087 * disable bit, which we don't touch here, but it's good
7088 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7090 I915_WRITE(GEN7_GT_MODE,
7091 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7093 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7094 snpcr &= ~GEN6_MBC_SNPCR_MASK;
7095 snpcr |= GEN6_MBC_SNPCR_MED;
7096 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7098 if (!HAS_PCH_NOP(dev))
7099 cpt_init_clock_gating(dev);
7101 gen6_check_mch_setup(dev);
7104 static void valleyview_init_clock_gating(struct drm_device *dev)
7106 struct drm_i915_private *dev_priv = dev->dev_private;
7108 /* WaDisableEarlyCull:vlv */
7109 I915_WRITE(_3D_CHICKEN3,
7110 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7112 /* WaDisableBackToBackFlipFix:vlv */
7113 I915_WRITE(IVB_CHICKEN3,
7114 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7115 CHICKEN3_DGMG_DONE_FIX_DISABLE);
7117 /* WaPsdDispatchEnable:vlv */
7118 /* WaDisablePSDDualDispatchEnable:vlv */
7119 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7120 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7121 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7123 /* WaDisable_RenderCache_OperationalFlush:vlv */
7124 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7126 /* WaForceL3Serialization:vlv */
7127 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7128 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7130 /* WaDisableDopClockGating:vlv */
7131 I915_WRITE(GEN7_ROW_CHICKEN2,
7132 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7134 /* This is required by WaCatErrorRejectionIssue:vlv */
7135 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7136 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7137 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7139 gen7_setup_fixed_func_scheduler(dev_priv);
7142 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7143 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7145 I915_WRITE(GEN6_UCGCTL2,
7146 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7148 /* WaDisableL3Bank2xClockGate:vlv
7149 * Disabling L3 clock gating- MMIO 940c[25] = 1
7150 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7151 I915_WRITE(GEN7_UCGCTL4,
7152 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7155 * BSpec says this must be set, even though
7156 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7158 I915_WRITE(CACHE_MODE_1,
7159 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7162 * BSpec recommends 8x4 when MSAA is used,
7163 * however in practice 16x4 seems fastest.
7165 * Note that PS/WM thread counts depend on the WIZ hashing
7166 * disable bit, which we don't touch here, but it's good
7167 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7169 I915_WRITE(GEN7_GT_MODE,
7170 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7173 * WaIncreaseL3CreditsForVLVB0:vlv
7174 * This is the hardware default actually.
7176 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7179 * WaDisableVLVClockGating_VBIIssue:vlv
7180 * Disable clock gating on th GCFG unit to prevent a delay
7181 * in the reporting of vblank events.
7183 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7186 static void cherryview_init_clock_gating(struct drm_device *dev)
7188 struct drm_i915_private *dev_priv = dev->dev_private;
7190 /* WaVSRefCountFullforceMissDisable:chv */
7191 /* WaDSRefCountFullforceMissDisable:chv */
7192 I915_WRITE(GEN7_FF_THREAD_MODE,
7193 I915_READ(GEN7_FF_THREAD_MODE) &
7194 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7196 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7197 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7198 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7200 /* WaDisableCSUnitClockGating:chv */
7201 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7202 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7204 /* WaDisableSDEUnitClockGating:chv */
7205 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7206 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7209 * WaProgramL3SqcReg1Default:chv
7210 * See gfxspecs/Related Documents/Performance Guide/
7211 * LSQC Setting Recommendations.
7213 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7216 * GTT cache may not work with big pages, so if those
7217 * are ever enabled GTT cache may need to be disabled.
7219 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7222 static void g4x_init_clock_gating(struct drm_device *dev)
7224 struct drm_i915_private *dev_priv = dev->dev_private;
7225 uint32_t dspclk_gate;
7227 I915_WRITE(RENCLK_GATE_D1, 0);
7228 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7229 GS_UNIT_CLOCK_GATE_DISABLE |
7230 CL_UNIT_CLOCK_GATE_DISABLE);
7231 I915_WRITE(RAMCLK_GATE_D, 0);
7232 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7233 OVRUNIT_CLOCK_GATE_DISABLE |
7234 OVCUNIT_CLOCK_GATE_DISABLE;
7236 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7237 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7239 /* WaDisableRenderCachePipelinedFlush */
7240 I915_WRITE(CACHE_MODE_0,
7241 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7243 /* WaDisable_RenderCache_OperationalFlush:g4x */
7244 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7246 g4x_disable_trickle_feed(dev);
7249 static void crestline_init_clock_gating(struct drm_device *dev)
7251 struct drm_i915_private *dev_priv = dev->dev_private;
7253 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7254 I915_WRITE(RENCLK_GATE_D2, 0);
7255 I915_WRITE(DSPCLK_GATE_D, 0);
7256 I915_WRITE(RAMCLK_GATE_D, 0);
7257 I915_WRITE16(DEUC, 0);
7258 I915_WRITE(MI_ARB_STATE,
7259 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7261 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7262 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7265 static void broadwater_init_clock_gating(struct drm_device *dev)
7267 struct drm_i915_private *dev_priv = dev->dev_private;
7269 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7270 I965_RCC_CLOCK_GATE_DISABLE |
7271 I965_RCPB_CLOCK_GATE_DISABLE |
7272 I965_ISC_CLOCK_GATE_DISABLE |
7273 I965_FBC_CLOCK_GATE_DISABLE);
7274 I915_WRITE(RENCLK_GATE_D2, 0);
7275 I915_WRITE(MI_ARB_STATE,
7276 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7278 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7279 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7282 static void gen3_init_clock_gating(struct drm_device *dev)
7284 struct drm_i915_private *dev_priv = dev->dev_private;
7285 u32 dstate = I915_READ(D_STATE);
7287 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7288 DSTATE_DOT_CLOCK_GATING;
7289 I915_WRITE(D_STATE, dstate);
7291 if (IS_PINEVIEW(dev))
7292 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7294 /* IIR "flip pending" means done if this bit is set */
7295 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7297 /* interrupts should cause a wake up from C3 */
7298 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7300 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7301 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7303 I915_WRITE(MI_ARB_STATE,
7304 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7307 static void i85x_init_clock_gating(struct drm_device *dev)
7309 struct drm_i915_private *dev_priv = dev->dev_private;
7311 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7313 /* interrupts should cause a wake up from C3 */
7314 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7315 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7317 I915_WRITE(MEM_MODE,
7318 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7321 static void i830_init_clock_gating(struct drm_device *dev)
7323 struct drm_i915_private *dev_priv = dev->dev_private;
7325 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7327 I915_WRITE(MEM_MODE,
7328 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7329 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7332 void intel_init_clock_gating(struct drm_device *dev)
7334 struct drm_i915_private *dev_priv = dev->dev_private;
7336 dev_priv->display.init_clock_gating(dev);
7339 void intel_suspend_hw(struct drm_device *dev)
7341 if (HAS_PCH_LPT(dev))
7342 lpt_suspend_hw(dev);
7345 static void nop_init_clock_gating(struct drm_device *dev)
7347 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7351 * intel_init_clock_gating_hooks - setup the clock gating hooks
7352 * @dev_priv: device private
7354 * Setup the hooks that configure which clocks of a given platform can be
7355 * gated and also apply various GT and display specific workarounds for these
7356 * platforms. Note that some GT specific workarounds are applied separately
7357 * when GPU contexts or batchbuffers start their execution.
7359 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7361 if (IS_SKYLAKE(dev_priv))
7362 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7363 else if (IS_KABYLAKE(dev_priv))
7364 dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7365 else if (IS_BROXTON(dev_priv))
7366 dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7367 else if (IS_BROADWELL(dev_priv))
7368 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7369 else if (IS_CHERRYVIEW(dev_priv))
7370 dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7371 else if (IS_HASWELL(dev_priv))
7372 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7373 else if (IS_IVYBRIDGE(dev_priv))
7374 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7375 else if (IS_VALLEYVIEW(dev_priv))
7376 dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7377 else if (IS_GEN6(dev_priv))
7378 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7379 else if (IS_GEN5(dev_priv))
7380 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7381 else if (IS_G4X(dev_priv))
7382 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7383 else if (IS_CRESTLINE(dev_priv))
7384 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7385 else if (IS_BROADWATER(dev_priv))
7386 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7387 else if (IS_GEN3(dev_priv))
7388 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7389 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7390 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7391 else if (IS_GEN2(dev_priv))
7392 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7394 MISSING_CASE(INTEL_DEVID(dev_priv));
7395 dev_priv->display.init_clock_gating = nop_init_clock_gating;
7399 /* Set up chip specific power management-related functions */
7400 void intel_init_pm(struct drm_device *dev)
7402 struct drm_i915_private *dev_priv = dev->dev_private;
7404 intel_fbc_init(dev_priv);
7407 if (IS_PINEVIEW(dev))
7408 i915_pineview_get_mem_freq(dev);
7409 else if (IS_GEN5(dev))
7410 i915_ironlake_get_mem_freq(dev);
7412 /* For FIFO watermark updates */
7413 if (INTEL_INFO(dev)->gen >= 9) {
7414 skl_setup_wm_latency(dev);
7415 dev_priv->display.update_wm = skl_update_wm;
7416 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7417 } else if (HAS_PCH_SPLIT(dev)) {
7418 ilk_setup_wm_latency(dev);
7420 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7421 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7422 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7423 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7424 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7425 dev_priv->display.compute_intermediate_wm =
7426 ilk_compute_intermediate_wm;
7427 dev_priv->display.initial_watermarks =
7428 ilk_initial_watermarks;
7429 dev_priv->display.optimize_watermarks =
7430 ilk_optimize_watermarks;
7432 DRM_DEBUG_KMS("Failed to read display plane latency. "
7435 } else if (IS_CHERRYVIEW(dev)) {
7436 vlv_setup_wm_latency(dev);
7437 dev_priv->display.update_wm = vlv_update_wm;
7438 } else if (IS_VALLEYVIEW(dev)) {
7439 vlv_setup_wm_latency(dev);
7440 dev_priv->display.update_wm = vlv_update_wm;
7441 } else if (IS_PINEVIEW(dev)) {
7442 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7445 dev_priv->mem_freq)) {
7446 DRM_INFO("failed to find known CxSR latency "
7447 "(found ddr%s fsb freq %d, mem freq %d), "
7449 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7450 dev_priv->fsb_freq, dev_priv->mem_freq);
7451 /* Disable CxSR and never update its watermark again */
7452 intel_set_memory_cxsr(dev_priv, false);
7453 dev_priv->display.update_wm = NULL;
7455 dev_priv->display.update_wm = pineview_update_wm;
7456 } else if (IS_G4X(dev)) {
7457 dev_priv->display.update_wm = g4x_update_wm;
7458 } else if (IS_GEN4(dev)) {
7459 dev_priv->display.update_wm = i965_update_wm;
7460 } else if (IS_GEN3(dev)) {
7461 dev_priv->display.update_wm = i9xx_update_wm;
7462 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7463 } else if (IS_GEN2(dev)) {
7464 if (INTEL_INFO(dev)->num_pipes == 1) {
7465 dev_priv->display.update_wm = i845_update_wm;
7466 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7468 dev_priv->display.update_wm = i9xx_update_wm;
7469 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7472 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7476 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7478 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7480 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7481 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7485 I915_WRITE(GEN6_PCODE_DATA, *val);
7486 I915_WRITE(GEN6_PCODE_DATA1, 0);
7487 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7489 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7491 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7495 *val = I915_READ(GEN6_PCODE_DATA);
7496 I915_WRITE(GEN6_PCODE_DATA, 0);
7501 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7503 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7505 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7506 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7510 I915_WRITE(GEN6_PCODE_DATA, val);
7511 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7513 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7515 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7519 I915_WRITE(GEN6_PCODE_DATA, 0);
7524 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7528 * Slow = Fast = GPLL ref * N
7530 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7533 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7535 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7538 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7542 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7544 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7547 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7549 /* CHV needs even values */
7550 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7553 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7555 if (IS_GEN9(dev_priv))
7556 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7558 else if (IS_CHERRYVIEW(dev_priv))
7559 return chv_gpu_freq(dev_priv, val);
7560 else if (IS_VALLEYVIEW(dev_priv))
7561 return byt_gpu_freq(dev_priv, val);
7563 return val * GT_FREQUENCY_MULTIPLIER;
7566 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7568 if (IS_GEN9(dev_priv))
7569 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7570 GT_FREQUENCY_MULTIPLIER);
7571 else if (IS_CHERRYVIEW(dev_priv))
7572 return chv_freq_opcode(dev_priv, val);
7573 else if (IS_VALLEYVIEW(dev_priv))
7574 return byt_freq_opcode(dev_priv, val);
7576 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7579 struct request_boost {
7580 struct work_struct work;
7581 struct drm_i915_gem_request *req;
7584 static void __intel_rps_boost_work(struct work_struct *work)
7586 struct request_boost *boost = container_of(work, struct request_boost, work);
7587 struct drm_i915_gem_request *req = boost->req;
7589 if (!i915_gem_request_completed(req, true))
7590 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7592 i915_gem_request_unreference(req);
7596 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7598 struct request_boost *boost;
7600 if (req == NULL || INTEL_GEN(req->i915) < 6)
7603 if (i915_gem_request_completed(req, true))
7606 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
7610 i915_gem_request_reference(req);
7613 INIT_WORK(&boost->work, __intel_rps_boost_work);
7614 queue_work(req->i915->wq, &boost->work);
7617 void intel_pm_setup(struct drm_device *dev)
7619 struct drm_i915_private *dev_priv = dev->dev_private;
7621 mutex_init(&dev_priv->rps.hw_lock);
7622 spin_lock_init(&dev_priv->rps.client_lock);
7624 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7625 intel_gen6_powersave_work);
7626 INIT_LIST_HEAD(&dev_priv->rps.clients);
7627 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
7628 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
7630 dev_priv->pm.suspended = false;
7631 atomic_set(&dev_priv->pm.wakeref_count, 0);
7632 atomic_set(&dev_priv->pm.atomic_seq, 0);