drm/i915: Define HSW/BDW display power domains the right way up
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_runtime_pm.c
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51
52 #define for_each_power_well(i, power_well, domain_mask, power_domains)  \
53         for (i = 0;                                                     \
54              i < (power_domains)->power_well_count &&                   \
55                  ((power_well) = &(power_domains)->power_wells[i]);     \
56              i++)                                                       \
57                 for_each_if ((power_well)->domains & (domain_mask))
58
59 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60         for (i = (power_domains)->power_well_count - 1;                  \
61              i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62              i--)                                                        \
63                 for_each_if ((power_well)->domains & (domain_mask))
64
65 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66                                     int power_well_id);
67
68 const char *
69 intel_display_power_domain_str(enum intel_display_power_domain domain)
70 {
71         switch (domain) {
72         case POWER_DOMAIN_PIPE_A:
73                 return "PIPE_A";
74         case POWER_DOMAIN_PIPE_B:
75                 return "PIPE_B";
76         case POWER_DOMAIN_PIPE_C:
77                 return "PIPE_C";
78         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
79                 return "PIPE_A_PANEL_FITTER";
80         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
81                 return "PIPE_B_PANEL_FITTER";
82         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
83                 return "PIPE_C_PANEL_FITTER";
84         case POWER_DOMAIN_TRANSCODER_A:
85                 return "TRANSCODER_A";
86         case POWER_DOMAIN_TRANSCODER_B:
87                 return "TRANSCODER_B";
88         case POWER_DOMAIN_TRANSCODER_C:
89                 return "TRANSCODER_C";
90         case POWER_DOMAIN_TRANSCODER_EDP:
91                 return "TRANSCODER_EDP";
92         case POWER_DOMAIN_TRANSCODER_DSI_A:
93                 return "TRANSCODER_DSI_A";
94         case POWER_DOMAIN_TRANSCODER_DSI_C:
95                 return "TRANSCODER_DSI_C";
96         case POWER_DOMAIN_PORT_DDI_A_LANES:
97                 return "PORT_DDI_A_LANES";
98         case POWER_DOMAIN_PORT_DDI_B_LANES:
99                 return "PORT_DDI_B_LANES";
100         case POWER_DOMAIN_PORT_DDI_C_LANES:
101                 return "PORT_DDI_C_LANES";
102         case POWER_DOMAIN_PORT_DDI_D_LANES:
103                 return "PORT_DDI_D_LANES";
104         case POWER_DOMAIN_PORT_DDI_E_LANES:
105                 return "PORT_DDI_E_LANES";
106         case POWER_DOMAIN_PORT_DSI:
107                 return "PORT_DSI";
108         case POWER_DOMAIN_PORT_CRT:
109                 return "PORT_CRT";
110         case POWER_DOMAIN_PORT_OTHER:
111                 return "PORT_OTHER";
112         case POWER_DOMAIN_VGA:
113                 return "VGA";
114         case POWER_DOMAIN_AUDIO:
115                 return "AUDIO";
116         case POWER_DOMAIN_PLLS:
117                 return "PLLS";
118         case POWER_DOMAIN_AUX_A:
119                 return "AUX_A";
120         case POWER_DOMAIN_AUX_B:
121                 return "AUX_B";
122         case POWER_DOMAIN_AUX_C:
123                 return "AUX_C";
124         case POWER_DOMAIN_AUX_D:
125                 return "AUX_D";
126         case POWER_DOMAIN_GMBUS:
127                 return "GMBUS";
128         case POWER_DOMAIN_INIT:
129                 return "INIT";
130         case POWER_DOMAIN_MODESET:
131                 return "MODESET";
132         default:
133                 MISSING_CASE(domain);
134                 return "?";
135         }
136 }
137
138 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
139                                     struct i915_power_well *power_well)
140 {
141         DRM_DEBUG_KMS("enabling %s\n", power_well->name);
142         power_well->ops->enable(dev_priv, power_well);
143         power_well->hw_enabled = true;
144 }
145
146 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
147                                      struct i915_power_well *power_well)
148 {
149         DRM_DEBUG_KMS("disabling %s\n", power_well->name);
150         power_well->hw_enabled = false;
151         power_well->ops->disable(dev_priv, power_well);
152 }
153
154 /*
155  * We should only use the power well if we explicitly asked the hardware to
156  * enable it, so check if it's enabled and also check if we've requested it to
157  * be enabled.
158  */
159 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
160                                    struct i915_power_well *power_well)
161 {
162         return I915_READ(HSW_PWR_WELL_DRIVER) ==
163                      (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
164 }
165
166 /**
167  * __intel_display_power_is_enabled - unlocked check for a power domain
168  * @dev_priv: i915 device instance
169  * @domain: power domain to check
170  *
171  * This is the unlocked version of intel_display_power_is_enabled() and should
172  * only be used from error capture and recovery code where deadlocks are
173  * possible.
174  *
175  * Returns:
176  * True when the power domain is enabled, false otherwise.
177  */
178 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
179                                       enum intel_display_power_domain domain)
180 {
181         struct i915_power_domains *power_domains;
182         struct i915_power_well *power_well;
183         bool is_enabled;
184         int i;
185
186         if (dev_priv->pm.suspended)
187                 return false;
188
189         power_domains = &dev_priv->power_domains;
190
191         is_enabled = true;
192
193         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
194                 if (power_well->always_on)
195                         continue;
196
197                 if (!power_well->hw_enabled) {
198                         is_enabled = false;
199                         break;
200                 }
201         }
202
203         return is_enabled;
204 }
205
206 /**
207  * intel_display_power_is_enabled - check for a power domain
208  * @dev_priv: i915 device instance
209  * @domain: power domain to check
210  *
211  * This function can be used to check the hw power domain state. It is mostly
212  * used in hardware state readout functions. Everywhere else code should rely
213  * upon explicit power domain reference counting to ensure that the hardware
214  * block is powered up before accessing it.
215  *
216  * Callers must hold the relevant modesetting locks to ensure that concurrent
217  * threads can't disable the power well while the caller tries to read a few
218  * registers.
219  *
220  * Returns:
221  * True when the power domain is enabled, false otherwise.
222  */
223 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
224                                     enum intel_display_power_domain domain)
225 {
226         struct i915_power_domains *power_domains;
227         bool ret;
228
229         power_domains = &dev_priv->power_domains;
230
231         mutex_lock(&power_domains->lock);
232         ret = __intel_display_power_is_enabled(dev_priv, domain);
233         mutex_unlock(&power_domains->lock);
234
235         return ret;
236 }
237
238 /**
239  * intel_display_set_init_power - set the initial power domain state
240  * @dev_priv: i915 device instance
241  * @enable: whether to enable or disable the initial power domain state
242  *
243  * For simplicity our driver load/unload and system suspend/resume code assumes
244  * that all power domains are always enabled. This functions controls the state
245  * of this little hack. While the initial power domain state is enabled runtime
246  * pm is effectively disabled.
247  */
248 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
249                                   bool enable)
250 {
251         if (dev_priv->power_domains.init_power_on == enable)
252                 return;
253
254         if (enable)
255                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
256         else
257                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
258
259         dev_priv->power_domains.init_power_on = enable;
260 }
261
262 /*
263  * Starting with Haswell, we have a "Power Down Well" that can be turned off
264  * when not needed anymore. We have 4 registers that can request the power well
265  * to be enabled, and it will only be disabled if none of the registers is
266  * requesting it to be enabled.
267  */
268 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
269 {
270         struct drm_device *dev = dev_priv->dev;
271
272         /*
273          * After we re-enable the power well, if we touch VGA register 0x3d5
274          * we'll get unclaimed register interrupts. This stops after we write
275          * anything to the VGA MSR register. The vgacon module uses this
276          * register all the time, so if we unbind our driver and, as a
277          * consequence, bind vgacon, we'll get stuck in an infinite loop at
278          * console_unlock(). So make here we touch the VGA MSR register, making
279          * sure vgacon can keep working normally without triggering interrupts
280          * and error messages.
281          */
282         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
283         outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
284         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
285
286         if (IS_BROADWELL(dev))
287                 gen8_irq_power_well_post_enable(dev_priv,
288                                                 1 << PIPE_C | 1 << PIPE_B);
289 }
290
291 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
292 {
293         if (IS_BROADWELL(dev_priv))
294                 gen8_irq_power_well_pre_disable(dev_priv,
295                                                 1 << PIPE_C | 1 << PIPE_B);
296 }
297
298 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
299                                        struct i915_power_well *power_well)
300 {
301         struct drm_device *dev = dev_priv->dev;
302
303         /*
304          * After we re-enable the power well, if we touch VGA register 0x3d5
305          * we'll get unclaimed register interrupts. This stops after we write
306          * anything to the VGA MSR register. The vgacon module uses this
307          * register all the time, so if we unbind our driver and, as a
308          * consequence, bind vgacon, we'll get stuck in an infinite loop at
309          * console_unlock(). So make here we touch the VGA MSR register, making
310          * sure vgacon can keep working normally without triggering interrupts
311          * and error messages.
312          */
313         if (power_well->data == SKL_DISP_PW_2) {
314                 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
315                 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
316                 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
317
318                 gen8_irq_power_well_post_enable(dev_priv,
319                                                 1 << PIPE_C | 1 << PIPE_B);
320         }
321 }
322
323 static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
324                                        struct i915_power_well *power_well)
325 {
326         if (power_well->data == SKL_DISP_PW_2)
327                 gen8_irq_power_well_pre_disable(dev_priv,
328                                                 1 << PIPE_C | 1 << PIPE_B);
329 }
330
331 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
332                                struct i915_power_well *power_well, bool enable)
333 {
334         bool is_enabled, enable_requested;
335         uint32_t tmp;
336
337         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
338         is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
339         enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
340
341         if (enable) {
342                 if (!enable_requested)
343                         I915_WRITE(HSW_PWR_WELL_DRIVER,
344                                    HSW_PWR_WELL_ENABLE_REQUEST);
345
346                 if (!is_enabled) {
347                         DRM_DEBUG_KMS("Enabling power well\n");
348                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
349                                       HSW_PWR_WELL_STATE_ENABLED), 20))
350                                 DRM_ERROR("Timeout enabling power well\n");
351                         hsw_power_well_post_enable(dev_priv);
352                 }
353
354         } else {
355                 if (enable_requested) {
356                         hsw_power_well_pre_disable(dev_priv);
357                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
358                         POSTING_READ(HSW_PWR_WELL_DRIVER);
359                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
360                 }
361         }
362 }
363
364 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
365         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
366         BIT(POWER_DOMAIN_PIPE_B) |                      \
367         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
368         BIT(POWER_DOMAIN_PIPE_C) |                      \
369         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
370         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
371         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
372         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
373         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
374         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
375         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
376         BIT(POWER_DOMAIN_AUX_B) |                       \
377         BIT(POWER_DOMAIN_AUX_C) |                       \
378         BIT(POWER_DOMAIN_AUX_D) |                       \
379         BIT(POWER_DOMAIN_AUDIO) |                       \
380         BIT(POWER_DOMAIN_VGA) |                         \
381         BIT(POWER_DOMAIN_INIT))
382 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (             \
383         BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
384         BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |            \
385         BIT(POWER_DOMAIN_INIT))
386 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (               \
387         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
388         BIT(POWER_DOMAIN_INIT))
389 #define SKL_DISPLAY_DDI_C_POWER_DOMAINS (               \
390         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
391         BIT(POWER_DOMAIN_INIT))
392 #define SKL_DISPLAY_DDI_D_POWER_DOMAINS (               \
393         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
394         BIT(POWER_DOMAIN_INIT))
395 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
396         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
397         BIT(POWER_DOMAIN_MODESET) |                     \
398         BIT(POWER_DOMAIN_AUX_A) |                       \
399         BIT(POWER_DOMAIN_INIT))
400
401 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
402         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
403         BIT(POWER_DOMAIN_PIPE_B) |                      \
404         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
405         BIT(POWER_DOMAIN_PIPE_C) |                      \
406         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
407         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
408         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
409         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
410         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
411         BIT(POWER_DOMAIN_AUX_B) |                       \
412         BIT(POWER_DOMAIN_AUX_C) |                       \
413         BIT(POWER_DOMAIN_AUDIO) |                       \
414         BIT(POWER_DOMAIN_VGA) |                         \
415         BIT(POWER_DOMAIN_GMBUS) |                       \
416         BIT(POWER_DOMAIN_INIT))
417 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
418         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
419         BIT(POWER_DOMAIN_MODESET) |                     \
420         BIT(POWER_DOMAIN_AUX_A) |                       \
421         BIT(POWER_DOMAIN_INIT))
422
423 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
424 {
425         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
426                   "DC9 already programmed to be enabled.\n");
427         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
428                   "DC5 still not disabled to enable DC9.\n");
429         WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
430         WARN_ONCE(intel_irqs_enabled(dev_priv),
431                   "Interrupts not disabled yet.\n");
432
433          /*
434           * TODO: check for the following to verify the conditions to enter DC9
435           * state are satisfied:
436           * 1] Check relevant display engine registers to verify if mode set
437           * disable sequence was followed.
438           * 2] Check if display uninitialize sequence is initialized.
439           */
440 }
441
442 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
443 {
444         WARN_ONCE(intel_irqs_enabled(dev_priv),
445                   "Interrupts not disabled yet.\n");
446         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
447                   "DC5 still not disabled.\n");
448
449          /*
450           * TODO: check for the following to verify DC9 state was indeed
451           * entered before programming to disable it:
452           * 1] Check relevant display engine registers to verify if mode
453           *  set disable sequence was followed.
454           * 2] Check if display uninitialize sequence is initialized.
455           */
456 }
457
458 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
459                                 u32 state)
460 {
461         int rewrites = 0;
462         int rereads = 0;
463         u32 v;
464
465         I915_WRITE(DC_STATE_EN, state);
466
467         /* It has been observed that disabling the dc6 state sometimes
468          * doesn't stick and dmc keeps returning old value. Make sure
469          * the write really sticks enough times and also force rewrite until
470          * we are confident that state is exactly what we want.
471          */
472         do  {
473                 v = I915_READ(DC_STATE_EN);
474
475                 if (v != state) {
476                         I915_WRITE(DC_STATE_EN, state);
477                         rewrites++;
478                         rereads = 0;
479                 } else if (rereads++ > 5) {
480                         break;
481                 }
482
483         } while (rewrites < 100);
484
485         if (v != state)
486                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
487                           state, v);
488
489         /* Most of the times we need one retry, avoid spam */
490         if (rewrites > 1)
491                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
492                               state, rewrites);
493 }
494
495 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
496 {
497         uint32_t val;
498         uint32_t mask;
499
500         mask = DC_STATE_EN_UPTO_DC5;
501         if (IS_BROXTON(dev_priv))
502                 mask |= DC_STATE_EN_DC9;
503         else
504                 mask |= DC_STATE_EN_UPTO_DC6;
505
506         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
507                 state &= dev_priv->csr.allowed_dc_mask;
508
509         val = I915_READ(DC_STATE_EN);
510         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
511                       val & mask, state);
512
513         /* Check if DMC is ignoring our DC state requests */
514         if ((val & mask) != dev_priv->csr.dc_state)
515                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
516                           dev_priv->csr.dc_state, val & mask);
517
518         val &= ~mask;
519         val |= state;
520
521         gen9_write_dc_state(dev_priv, val);
522
523         dev_priv->csr.dc_state = val & mask;
524 }
525
526 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
527 {
528         assert_can_enable_dc9(dev_priv);
529
530         DRM_DEBUG_KMS("Enabling DC9\n");
531
532         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
533 }
534
535 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
536 {
537         assert_can_disable_dc9(dev_priv);
538
539         DRM_DEBUG_KMS("Disabling DC9\n");
540
541         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
542 }
543
544 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
545 {
546         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
547                   "CSR program storage start is NULL\n");
548         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
549         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
550 }
551
552 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
553 {
554         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
555                                         SKL_DISP_PW_2);
556
557         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
558
559         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
560                   "DC5 already programmed to be enabled.\n");
561         assert_rpm_wakelock_held(dev_priv);
562
563         assert_csr_loaded(dev_priv);
564 }
565
566 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
567 {
568         assert_can_enable_dc5(dev_priv);
569
570         DRM_DEBUG_KMS("Enabling DC5\n");
571
572         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
573 }
574
575 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
576 {
577         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
578                   "Backlight is not disabled.\n");
579         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
580                   "DC6 already programmed to be enabled.\n");
581
582         assert_csr_loaded(dev_priv);
583 }
584
585 void skl_enable_dc6(struct drm_i915_private *dev_priv)
586 {
587         assert_can_enable_dc6(dev_priv);
588
589         DRM_DEBUG_KMS("Enabling DC6\n");
590
591         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
592
593 }
594
595 void skl_disable_dc6(struct drm_i915_private *dev_priv)
596 {
597         DRM_DEBUG_KMS("Disabling DC6\n");
598
599         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
600 }
601
602 static void
603 gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
604                                   struct i915_power_well *power_well)
605 {
606         enum skl_disp_power_wells power_well_id = power_well->data;
607         u32 val;
608         u32 mask;
609
610         mask = SKL_POWER_WELL_REQ(power_well_id);
611
612         val = I915_READ(HSW_PWR_WELL_KVMR);
613         if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
614                       power_well->name))
615                 I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
616
617         val = I915_READ(HSW_PWR_WELL_BIOS);
618         val |= I915_READ(HSW_PWR_WELL_DEBUG);
619
620         if (!(val & mask))
621                 return;
622
623         /*
624          * DMC is known to force on the request bits for power well 1 on SKL
625          * and BXT and the misc IO power well on SKL but we don't expect any
626          * other request bits to be set, so WARN for those.
627          */
628         if (power_well_id == SKL_DISP_PW_1 ||
629             (IS_SKYLAKE(dev_priv) && power_well_id == SKL_DISP_PW_MISC_IO))
630                 DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
631                                  "by DMC\n", power_well->name);
632         else
633                 WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
634                           power_well->name);
635
636         I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
637         I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
638 }
639
640 static void skl_set_power_well(struct drm_i915_private *dev_priv,
641                         struct i915_power_well *power_well, bool enable)
642 {
643         uint32_t tmp, fuse_status;
644         uint32_t req_mask, state_mask;
645         bool is_enabled, enable_requested, check_fuse_status = false;
646
647         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
648         fuse_status = I915_READ(SKL_FUSE_STATUS);
649
650         switch (power_well->data) {
651         case SKL_DISP_PW_1:
652                 if (wait_for((I915_READ(SKL_FUSE_STATUS) &
653                         SKL_FUSE_PG0_DIST_STATUS), 1)) {
654                         DRM_ERROR("PG0 not enabled\n");
655                         return;
656                 }
657                 break;
658         case SKL_DISP_PW_2:
659                 if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
660                         DRM_ERROR("PG1 in disabled state\n");
661                         return;
662                 }
663                 break;
664         case SKL_DISP_PW_DDI_A_E:
665         case SKL_DISP_PW_DDI_B:
666         case SKL_DISP_PW_DDI_C:
667         case SKL_DISP_PW_DDI_D:
668         case SKL_DISP_PW_MISC_IO:
669                 break;
670         default:
671                 WARN(1, "Unknown power well %lu\n", power_well->data);
672                 return;
673         }
674
675         req_mask = SKL_POWER_WELL_REQ(power_well->data);
676         enable_requested = tmp & req_mask;
677         state_mask = SKL_POWER_WELL_STATE(power_well->data);
678         is_enabled = tmp & state_mask;
679
680         if (!enable && enable_requested)
681                 skl_power_well_pre_disable(dev_priv, power_well);
682
683         if (enable) {
684                 if (!enable_requested) {
685                         WARN((tmp & state_mask) &&
686                                 !I915_READ(HSW_PWR_WELL_BIOS),
687                                 "Invalid for power well status to be enabled, unless done by the BIOS, \
688                                 when request is to disable!\n");
689                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
690                 }
691
692                 if (!is_enabled) {
693                         DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
694                         check_fuse_status = true;
695                 }
696         } else {
697                 if (enable_requested) {
698                         I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
699                         POSTING_READ(HSW_PWR_WELL_DRIVER);
700                         DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
701                 }
702
703                 if (IS_GEN9(dev_priv))
704                         gen9_sanitize_power_well_requests(dev_priv, power_well);
705         }
706
707         if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
708                      1))
709                 DRM_ERROR("%s %s timeout\n",
710                           power_well->name, enable ? "enable" : "disable");
711
712         if (check_fuse_status) {
713                 if (power_well->data == SKL_DISP_PW_1) {
714                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
715                                 SKL_FUSE_PG1_DIST_STATUS), 1))
716                                 DRM_ERROR("PG1 distributing status timeout\n");
717                 } else if (power_well->data == SKL_DISP_PW_2) {
718                         if (wait_for((I915_READ(SKL_FUSE_STATUS) &
719                                 SKL_FUSE_PG2_DIST_STATUS), 1))
720                                 DRM_ERROR("PG2 distributing status timeout\n");
721                 }
722         }
723
724         if (enable && !is_enabled)
725                 skl_power_well_post_enable(dev_priv, power_well);
726 }
727
728 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
729                                    struct i915_power_well *power_well)
730 {
731         hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
732
733         /*
734          * We're taking over the BIOS, so clear any requests made by it since
735          * the driver is in charge now.
736          */
737         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
738                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
739 }
740
741 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
742                                   struct i915_power_well *power_well)
743 {
744         hsw_set_power_well(dev_priv, power_well, true);
745 }
746
747 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
748                                    struct i915_power_well *power_well)
749 {
750         hsw_set_power_well(dev_priv, power_well, false);
751 }
752
753 static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
754                                         struct i915_power_well *power_well)
755 {
756         uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
757                 SKL_POWER_WELL_STATE(power_well->data);
758
759         return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
760 }
761
762 static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
763                                 struct i915_power_well *power_well)
764 {
765         skl_set_power_well(dev_priv, power_well, power_well->count > 0);
766
767         /* Clear any request made by BIOS as driver is taking over */
768         I915_WRITE(HSW_PWR_WELL_BIOS, 0);
769 }
770
771 static void skl_power_well_enable(struct drm_i915_private *dev_priv,
772                                 struct i915_power_well *power_well)
773 {
774         skl_set_power_well(dev_priv, power_well, true);
775 }
776
777 static void skl_power_well_disable(struct drm_i915_private *dev_priv,
778                                 struct i915_power_well *power_well)
779 {
780         skl_set_power_well(dev_priv, power_well, false);
781 }
782
783 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
784                                            struct i915_power_well *power_well)
785 {
786         return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
787 }
788
789 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
790                                           struct i915_power_well *power_well)
791 {
792         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
793
794         if (IS_BROXTON(dev_priv)) {
795                 broxton_cdclk_verify_state(dev_priv);
796                 broxton_ddi_phy_verify_state(dev_priv);
797         }
798 }
799
800 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
801                                            struct i915_power_well *power_well)
802 {
803         if (!dev_priv->csr.dmc_payload)
804                 return;
805
806         if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
807                 skl_enable_dc6(dev_priv);
808         else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
809                 gen9_enable_dc5(dev_priv);
810 }
811
812 static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
813                                            struct i915_power_well *power_well)
814 {
815         if (power_well->count > 0)
816                 gen9_dc_off_power_well_enable(dev_priv, power_well);
817         else
818                 gen9_dc_off_power_well_disable(dev_priv, power_well);
819 }
820
821 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
822                                            struct i915_power_well *power_well)
823 {
824 }
825
826 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
827                                              struct i915_power_well *power_well)
828 {
829         return true;
830 }
831
832 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
833                                struct i915_power_well *power_well, bool enable)
834 {
835         enum punit_power_well power_well_id = power_well->data;
836         u32 mask;
837         u32 state;
838         u32 ctrl;
839
840         mask = PUNIT_PWRGT_MASK(power_well_id);
841         state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
842                          PUNIT_PWRGT_PWR_GATE(power_well_id);
843
844         mutex_lock(&dev_priv->rps.hw_lock);
845
846 #define COND \
847         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
848
849         if (COND)
850                 goto out;
851
852         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
853         ctrl &= ~mask;
854         ctrl |= state;
855         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
856
857         if (wait_for(COND, 100))
858                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
859                           state,
860                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
861
862 #undef COND
863
864 out:
865         mutex_unlock(&dev_priv->rps.hw_lock);
866 }
867
868 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
869                                    struct i915_power_well *power_well)
870 {
871         vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
872 }
873
874 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
875                                   struct i915_power_well *power_well)
876 {
877         vlv_set_power_well(dev_priv, power_well, true);
878 }
879
880 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
881                                    struct i915_power_well *power_well)
882 {
883         vlv_set_power_well(dev_priv, power_well, false);
884 }
885
886 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
887                                    struct i915_power_well *power_well)
888 {
889         int power_well_id = power_well->data;
890         bool enabled = false;
891         u32 mask;
892         u32 state;
893         u32 ctrl;
894
895         mask = PUNIT_PWRGT_MASK(power_well_id);
896         ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
897
898         mutex_lock(&dev_priv->rps.hw_lock);
899
900         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
901         /*
902          * We only ever set the power-on and power-gate states, anything
903          * else is unexpected.
904          */
905         WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
906                 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
907         if (state == ctrl)
908                 enabled = true;
909
910         /*
911          * A transient state at this point would mean some unexpected party
912          * is poking at the power controls too.
913          */
914         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
915         WARN_ON(ctrl != state);
916
917         mutex_unlock(&dev_priv->rps.hw_lock);
918
919         return enabled;
920 }
921
922 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
923 {
924         I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
925
926         /*
927          * Disable trickle feed and enable pnd deadline calculation
928          */
929         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
930         I915_WRITE(CBR1_VLV, 0);
931 }
932
933 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
934 {
935         enum pipe pipe;
936
937         /*
938          * Enable the CRI clock source so we can get at the
939          * display and the reference clock for VGA
940          * hotplug / manual detection. Supposedly DSI also
941          * needs the ref clock up and running.
942          *
943          * CHV DPLL B/C have some issues if VGA mode is enabled.
944          */
945         for_each_pipe(dev_priv->dev, pipe) {
946                 u32 val = I915_READ(DPLL(pipe));
947
948                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
949                 if (pipe != PIPE_A)
950                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
951
952                 I915_WRITE(DPLL(pipe), val);
953         }
954
955         vlv_init_display_clock_gating(dev_priv);
956
957         spin_lock_irq(&dev_priv->irq_lock);
958         valleyview_enable_display_irqs(dev_priv);
959         spin_unlock_irq(&dev_priv->irq_lock);
960
961         /*
962          * During driver initialization/resume we can avoid restoring the
963          * part of the HW/SW state that will be inited anyway explicitly.
964          */
965         if (dev_priv->power_domains.initializing)
966                 return;
967
968         intel_hpd_init(dev_priv);
969
970         i915_redisable_vga_power_on(dev_priv->dev);
971 }
972
973 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
974 {
975         spin_lock_irq(&dev_priv->irq_lock);
976         valleyview_disable_display_irqs(dev_priv);
977         spin_unlock_irq(&dev_priv->irq_lock);
978
979         /* make sure we're done processing display irqs */
980         synchronize_irq(dev_priv->dev->irq);
981
982         vlv_power_sequencer_reset(dev_priv);
983 }
984
985 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
986                                           struct i915_power_well *power_well)
987 {
988         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
989
990         vlv_set_power_well(dev_priv, power_well, true);
991
992         vlv_display_power_well_init(dev_priv);
993 }
994
995 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
996                                            struct i915_power_well *power_well)
997 {
998         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
999
1000         vlv_display_power_well_deinit(dev_priv);
1001
1002         vlv_set_power_well(dev_priv, power_well, false);
1003 }
1004
1005 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1006                                            struct i915_power_well *power_well)
1007 {
1008         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1009
1010         /* since ref/cri clock was enabled */
1011         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1012
1013         vlv_set_power_well(dev_priv, power_well, true);
1014
1015         /*
1016          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1017          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1018          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1019          *   b. The other bits such as sfr settings / modesel may all
1020          *      be set to 0.
1021          *
1022          * This should only be done on init and resume from S3 with
1023          * both PLLs disabled, or we risk losing DPIO and PLL
1024          * synchronization.
1025          */
1026         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1027 }
1028
1029 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1030                                             struct i915_power_well *power_well)
1031 {
1032         enum pipe pipe;
1033
1034         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1035
1036         for_each_pipe(dev_priv, pipe)
1037                 assert_pll_disabled(dev_priv, pipe);
1038
1039         /* Assert common reset */
1040         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1041
1042         vlv_set_power_well(dev_priv, power_well, false);
1043 }
1044
1045 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1046
1047 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1048                                                  int power_well_id)
1049 {
1050         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1051         int i;
1052
1053         for (i = 0; i < power_domains->power_well_count; i++) {
1054                 struct i915_power_well *power_well;
1055
1056                 power_well = &power_domains->power_wells[i];
1057                 if (power_well->data == power_well_id)
1058                         return power_well;
1059         }
1060
1061         return NULL;
1062 }
1063
1064 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1065
1066 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1067 {
1068         struct i915_power_well *cmn_bc =
1069                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1070         struct i915_power_well *cmn_d =
1071                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1072         u32 phy_control = dev_priv->chv_phy_control;
1073         u32 phy_status = 0;
1074         u32 phy_status_mask = 0xffffffff;
1075         u32 tmp;
1076
1077         /*
1078          * The BIOS can leave the PHY is some weird state
1079          * where it doesn't fully power down some parts.
1080          * Disable the asserts until the PHY has been fully
1081          * reset (ie. the power well has been disabled at
1082          * least once).
1083          */
1084         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1085                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1086                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1087                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1088                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1089                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1090                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1091
1092         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1093                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1094                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1095                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1096
1097         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1098                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1099
1100                 /* this assumes override is only used to enable lanes */
1101                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1102                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1103
1104                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1105                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1106
1107                 /* CL1 is on whenever anything is on in either channel */
1108                 if (BITS_SET(phy_control,
1109                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1110                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1111                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1112
1113                 /*
1114                  * The DPLLB check accounts for the pipe B + port A usage
1115                  * with CL2 powered up but all the lanes in the second channel
1116                  * powered down.
1117                  */
1118                 if (BITS_SET(phy_control,
1119                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1120                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1121                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1122
1123                 if (BITS_SET(phy_control,
1124                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1125                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1126                 if (BITS_SET(phy_control,
1127                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1128                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1129
1130                 if (BITS_SET(phy_control,
1131                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1132                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1133                 if (BITS_SET(phy_control,
1134                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1135                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1136         }
1137
1138         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1139                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1140
1141                 /* this assumes override is only used to enable lanes */
1142                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1143                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1144
1145                 if (BITS_SET(phy_control,
1146                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1147                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1148
1149                 if (BITS_SET(phy_control,
1150                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1151                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1152                 if (BITS_SET(phy_control,
1153                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1154                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1155         }
1156
1157         phy_status &= phy_status_mask;
1158
1159         /*
1160          * The PHY may be busy with some initial calibration and whatnot,
1161          * so the power state can take a while to actually change.
1162          */
1163         if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1164                 WARN(phy_status != tmp,
1165                      "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1166                      tmp, phy_status, dev_priv->chv_phy_control);
1167 }
1168
1169 #undef BITS_SET
1170
1171 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1172                                            struct i915_power_well *power_well)
1173 {
1174         enum dpio_phy phy;
1175         enum pipe pipe;
1176         uint32_t tmp;
1177
1178         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1179                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1180
1181         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1182                 pipe = PIPE_A;
1183                 phy = DPIO_PHY0;
1184         } else {
1185                 pipe = PIPE_C;
1186                 phy = DPIO_PHY1;
1187         }
1188
1189         /* since ref/cri clock was enabled */
1190         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1191         vlv_set_power_well(dev_priv, power_well, true);
1192
1193         /* Poll for phypwrgood signal */
1194         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1195                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1196
1197         mutex_lock(&dev_priv->sb_lock);
1198
1199         /* Enable dynamic power down */
1200         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1201         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1202                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1203         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1204
1205         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1206                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1207                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1208                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1209         } else {
1210                 /*
1211                  * Force the non-existing CL2 off. BXT does this
1212                  * too, so maybe it saves some power even though
1213                  * CL2 doesn't exist?
1214                  */
1215                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1216                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1217                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1218         }
1219
1220         mutex_unlock(&dev_priv->sb_lock);
1221
1222         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1223         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1224
1225         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1226                       phy, dev_priv->chv_phy_control);
1227
1228         assert_chv_phy_status(dev_priv);
1229 }
1230
1231 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1232                                             struct i915_power_well *power_well)
1233 {
1234         enum dpio_phy phy;
1235
1236         WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1237                      power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1238
1239         if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1240                 phy = DPIO_PHY0;
1241                 assert_pll_disabled(dev_priv, PIPE_A);
1242                 assert_pll_disabled(dev_priv, PIPE_B);
1243         } else {
1244                 phy = DPIO_PHY1;
1245                 assert_pll_disabled(dev_priv, PIPE_C);
1246         }
1247
1248         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1249         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1250
1251         vlv_set_power_well(dev_priv, power_well, false);
1252
1253         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1254                       phy, dev_priv->chv_phy_control);
1255
1256         /* PHY is fully reset now, so we can enable the PHY state asserts */
1257         dev_priv->chv_phy_assert[phy] = true;
1258
1259         assert_chv_phy_status(dev_priv);
1260 }
1261
1262 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1263                                      enum dpio_channel ch, bool override, unsigned int mask)
1264 {
1265         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1266         u32 reg, val, expected, actual;
1267
1268         /*
1269          * The BIOS can leave the PHY is some weird state
1270          * where it doesn't fully power down some parts.
1271          * Disable the asserts until the PHY has been fully
1272          * reset (ie. the power well has been disabled at
1273          * least once).
1274          */
1275         if (!dev_priv->chv_phy_assert[phy])
1276                 return;
1277
1278         if (ch == DPIO_CH0)
1279                 reg = _CHV_CMN_DW0_CH0;
1280         else
1281                 reg = _CHV_CMN_DW6_CH1;
1282
1283         mutex_lock(&dev_priv->sb_lock);
1284         val = vlv_dpio_read(dev_priv, pipe, reg);
1285         mutex_unlock(&dev_priv->sb_lock);
1286
1287         /*
1288          * This assumes !override is only used when the port is disabled.
1289          * All lanes should power down even without the override when
1290          * the port is disabled.
1291          */
1292         if (!override || mask == 0xf) {
1293                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1294                 /*
1295                  * If CH1 common lane is not active anymore
1296                  * (eg. for pipe B DPLL) the entire channel will
1297                  * shut down, which causes the common lane registers
1298                  * to read as 0. That means we can't actually check
1299                  * the lane power down status bits, but as the entire
1300                  * register reads as 0 it's a good indication that the
1301                  * channel is indeed entirely powered down.
1302                  */
1303                 if (ch == DPIO_CH1 && val == 0)
1304                         expected = 0;
1305         } else if (mask != 0x0) {
1306                 expected = DPIO_ANYDL_POWERDOWN;
1307         } else {
1308                 expected = 0;
1309         }
1310
1311         if (ch == DPIO_CH0)
1312                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1313         else
1314                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1315         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1316
1317         WARN(actual != expected,
1318              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1319              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1320              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1321              reg, val);
1322 }
1323
1324 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1325                           enum dpio_channel ch, bool override)
1326 {
1327         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1328         bool was_override;
1329
1330         mutex_lock(&power_domains->lock);
1331
1332         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1333
1334         if (override == was_override)
1335                 goto out;
1336
1337         if (override)
1338                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1339         else
1340                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1341
1342         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1343
1344         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1345                       phy, ch, dev_priv->chv_phy_control);
1346
1347         assert_chv_phy_status(dev_priv);
1348
1349 out:
1350         mutex_unlock(&power_domains->lock);
1351
1352         return was_override;
1353 }
1354
1355 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1356                              bool override, unsigned int mask)
1357 {
1358         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1359         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1360         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1361         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1362
1363         mutex_lock(&power_domains->lock);
1364
1365         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1366         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1367
1368         if (override)
1369                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1370         else
1371                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1372
1373         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1374
1375         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1376                       phy, ch, mask, dev_priv->chv_phy_control);
1377
1378         assert_chv_phy_status(dev_priv);
1379
1380         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1381
1382         mutex_unlock(&power_domains->lock);
1383 }
1384
1385 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1386                                         struct i915_power_well *power_well)
1387 {
1388         enum pipe pipe = power_well->data;
1389         bool enabled;
1390         u32 state, ctrl;
1391
1392         mutex_lock(&dev_priv->rps.hw_lock);
1393
1394         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1395         /*
1396          * We only ever set the power-on and power-gate states, anything
1397          * else is unexpected.
1398          */
1399         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1400         enabled = state == DP_SSS_PWR_ON(pipe);
1401
1402         /*
1403          * A transient state at this point would mean some unexpected party
1404          * is poking at the power controls too.
1405          */
1406         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1407         WARN_ON(ctrl << 16 != state);
1408
1409         mutex_unlock(&dev_priv->rps.hw_lock);
1410
1411         return enabled;
1412 }
1413
1414 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1415                                     struct i915_power_well *power_well,
1416                                     bool enable)
1417 {
1418         enum pipe pipe = power_well->data;
1419         u32 state;
1420         u32 ctrl;
1421
1422         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1423
1424         mutex_lock(&dev_priv->rps.hw_lock);
1425
1426 #define COND \
1427         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1428
1429         if (COND)
1430                 goto out;
1431
1432         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1433         ctrl &= ~DP_SSC_MASK(pipe);
1434         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1435         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1436
1437         if (wait_for(COND, 100))
1438                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1439                           state,
1440                           vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1441
1442 #undef COND
1443
1444 out:
1445         mutex_unlock(&dev_priv->rps.hw_lock);
1446 }
1447
1448 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1449                                         struct i915_power_well *power_well)
1450 {
1451         WARN_ON_ONCE(power_well->data != PIPE_A);
1452
1453         chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1454 }
1455
1456 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1457                                        struct i915_power_well *power_well)
1458 {
1459         WARN_ON_ONCE(power_well->data != PIPE_A);
1460
1461         chv_set_pipe_power_well(dev_priv, power_well, true);
1462
1463         vlv_display_power_well_init(dev_priv);
1464 }
1465
1466 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1467                                         struct i915_power_well *power_well)
1468 {
1469         WARN_ON_ONCE(power_well->data != PIPE_A);
1470
1471         vlv_display_power_well_deinit(dev_priv);
1472
1473         chv_set_pipe_power_well(dev_priv, power_well, false);
1474 }
1475
1476 static void
1477 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1478                                  enum intel_display_power_domain domain)
1479 {
1480         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1481         struct i915_power_well *power_well;
1482         int i;
1483
1484         for_each_power_well(i, power_well, BIT(domain), power_domains) {
1485                 if (!power_well->count++)
1486                         intel_power_well_enable(dev_priv, power_well);
1487         }
1488
1489         power_domains->domain_use_count[domain]++;
1490 }
1491
1492 /**
1493  * intel_display_power_get - grab a power domain reference
1494  * @dev_priv: i915 device instance
1495  * @domain: power domain to reference
1496  *
1497  * This function grabs a power domain reference for @domain and ensures that the
1498  * power domain and all its parents are powered up. Therefore users should only
1499  * grab a reference to the innermost power domain they need.
1500  *
1501  * Any power domain reference obtained by this function must have a symmetric
1502  * call to intel_display_power_put() to release the reference again.
1503  */
1504 void intel_display_power_get(struct drm_i915_private *dev_priv,
1505                              enum intel_display_power_domain domain)
1506 {
1507         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1508
1509         intel_runtime_pm_get(dev_priv);
1510
1511         mutex_lock(&power_domains->lock);
1512
1513         __intel_display_power_get_domain(dev_priv, domain);
1514
1515         mutex_unlock(&power_domains->lock);
1516 }
1517
1518 /**
1519  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1520  * @dev_priv: i915 device instance
1521  * @domain: power domain to reference
1522  *
1523  * This function grabs a power domain reference for @domain and ensures that the
1524  * power domain and all its parents are powered up. Therefore users should only
1525  * grab a reference to the innermost power domain they need.
1526  *
1527  * Any power domain reference obtained by this function must have a symmetric
1528  * call to intel_display_power_put() to release the reference again.
1529  */
1530 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1531                                         enum intel_display_power_domain domain)
1532 {
1533         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1534         bool is_enabled;
1535
1536         if (!intel_runtime_pm_get_if_in_use(dev_priv))
1537                 return false;
1538
1539         mutex_lock(&power_domains->lock);
1540
1541         if (__intel_display_power_is_enabled(dev_priv, domain)) {
1542                 __intel_display_power_get_domain(dev_priv, domain);
1543                 is_enabled = true;
1544         } else {
1545                 is_enabled = false;
1546         }
1547
1548         mutex_unlock(&power_domains->lock);
1549
1550         if (!is_enabled)
1551                 intel_runtime_pm_put(dev_priv);
1552
1553         return is_enabled;
1554 }
1555
1556 /**
1557  * intel_display_power_put - release a power domain reference
1558  * @dev_priv: i915 device instance
1559  * @domain: power domain to reference
1560  *
1561  * This function drops the power domain reference obtained by
1562  * intel_display_power_get() and might power down the corresponding hardware
1563  * block right away if this is the last reference.
1564  */
1565 void intel_display_power_put(struct drm_i915_private *dev_priv,
1566                              enum intel_display_power_domain domain)
1567 {
1568         struct i915_power_domains *power_domains;
1569         struct i915_power_well *power_well;
1570         int i;
1571
1572         power_domains = &dev_priv->power_domains;
1573
1574         mutex_lock(&power_domains->lock);
1575
1576         WARN(!power_domains->domain_use_count[domain],
1577              "Use count on domain %s is already zero\n",
1578              intel_display_power_domain_str(domain));
1579         power_domains->domain_use_count[domain]--;
1580
1581         for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1582                 WARN(!power_well->count,
1583                      "Use count on power well %s is already zero",
1584                      power_well->name);
1585
1586                 if (!--power_well->count)
1587                         intel_power_well_disable(dev_priv, power_well);
1588         }
1589
1590         mutex_unlock(&power_domains->lock);
1591
1592         intel_runtime_pm_put(dev_priv);
1593 }
1594
1595 #define HSW_DISPLAY_POWER_DOMAINS (                     \
1596         BIT(POWER_DOMAIN_PIPE_B) |                      \
1597         BIT(POWER_DOMAIN_PIPE_C) |                      \
1598         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |         \
1599         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
1600         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
1601         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
1602         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
1603         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
1604         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
1605         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
1606         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
1607         BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
1608         BIT(POWER_DOMAIN_VGA) |                         \
1609         BIT(POWER_DOMAIN_AUDIO) |                       \
1610         BIT(POWER_DOMAIN_INIT))
1611
1612 #define BDW_DISPLAY_POWER_DOMAINS (                     \
1613         BIT(POWER_DOMAIN_PIPE_B) |                      \
1614         BIT(POWER_DOMAIN_PIPE_C) |                      \
1615         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |         \
1616         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |         \
1617         BIT(POWER_DOMAIN_TRANSCODER_A) |                \
1618         BIT(POWER_DOMAIN_TRANSCODER_B) |                \
1619         BIT(POWER_DOMAIN_TRANSCODER_C) |                \
1620         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
1621         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
1622         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |            \
1623         BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */        \
1624         BIT(POWER_DOMAIN_VGA) |                         \
1625         BIT(POWER_DOMAIN_AUDIO) |                       \
1626         BIT(POWER_DOMAIN_INIT))
1627
1628 #define VLV_DISPLAY_POWER_DOMAINS (             \
1629         BIT(POWER_DOMAIN_PIPE_A) |              \
1630         BIT(POWER_DOMAIN_PIPE_B) |              \
1631         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1632         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1633         BIT(POWER_DOMAIN_TRANSCODER_A) |        \
1634         BIT(POWER_DOMAIN_TRANSCODER_B) |        \
1635         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1636         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1637         BIT(POWER_DOMAIN_PORT_DSI) |            \
1638         BIT(POWER_DOMAIN_PORT_CRT) |            \
1639         BIT(POWER_DOMAIN_VGA) |                 \
1640         BIT(POWER_DOMAIN_AUDIO) |               \
1641         BIT(POWER_DOMAIN_AUX_B) |               \
1642         BIT(POWER_DOMAIN_AUX_C) |               \
1643         BIT(POWER_DOMAIN_GMBUS) |               \
1644         BIT(POWER_DOMAIN_INIT))
1645
1646 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
1647         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1648         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1649         BIT(POWER_DOMAIN_PORT_CRT) |            \
1650         BIT(POWER_DOMAIN_AUX_B) |               \
1651         BIT(POWER_DOMAIN_AUX_C) |               \
1652         BIT(POWER_DOMAIN_INIT))
1653
1654 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
1655         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1656         BIT(POWER_DOMAIN_AUX_B) |               \
1657         BIT(POWER_DOMAIN_INIT))
1658
1659 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
1660         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1661         BIT(POWER_DOMAIN_AUX_B) |               \
1662         BIT(POWER_DOMAIN_INIT))
1663
1664 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
1665         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1666         BIT(POWER_DOMAIN_AUX_C) |               \
1667         BIT(POWER_DOMAIN_INIT))
1668
1669 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
1670         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1671         BIT(POWER_DOMAIN_AUX_C) |               \
1672         BIT(POWER_DOMAIN_INIT))
1673
1674 #define CHV_DISPLAY_POWER_DOMAINS (             \
1675         BIT(POWER_DOMAIN_PIPE_A) |              \
1676         BIT(POWER_DOMAIN_PIPE_B) |              \
1677         BIT(POWER_DOMAIN_PIPE_C) |              \
1678         BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
1679         BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
1680         BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
1681         BIT(POWER_DOMAIN_TRANSCODER_A) |        \
1682         BIT(POWER_DOMAIN_TRANSCODER_B) |        \
1683         BIT(POWER_DOMAIN_TRANSCODER_C) |        \
1684         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1685         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1686         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
1687         BIT(POWER_DOMAIN_PORT_DSI) |            \
1688         BIT(POWER_DOMAIN_VGA) |                 \
1689         BIT(POWER_DOMAIN_AUDIO) |               \
1690         BIT(POWER_DOMAIN_AUX_B) |               \
1691         BIT(POWER_DOMAIN_AUX_C) |               \
1692         BIT(POWER_DOMAIN_AUX_D) |               \
1693         BIT(POWER_DOMAIN_GMBUS) |               \
1694         BIT(POWER_DOMAIN_INIT))
1695
1696 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
1697         BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |    \
1698         BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |    \
1699         BIT(POWER_DOMAIN_AUX_B) |               \
1700         BIT(POWER_DOMAIN_AUX_C) |               \
1701         BIT(POWER_DOMAIN_INIT))
1702
1703 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
1704         BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |    \
1705         BIT(POWER_DOMAIN_AUX_D) |               \
1706         BIT(POWER_DOMAIN_INIT))
1707
1708 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1709         .sync_hw = i9xx_always_on_power_well_noop,
1710         .enable = i9xx_always_on_power_well_noop,
1711         .disable = i9xx_always_on_power_well_noop,
1712         .is_enabled = i9xx_always_on_power_well_enabled,
1713 };
1714
1715 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1716         .sync_hw = chv_pipe_power_well_sync_hw,
1717         .enable = chv_pipe_power_well_enable,
1718         .disable = chv_pipe_power_well_disable,
1719         .is_enabled = chv_pipe_power_well_enabled,
1720 };
1721
1722 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1723         .sync_hw = vlv_power_well_sync_hw,
1724         .enable = chv_dpio_cmn_power_well_enable,
1725         .disable = chv_dpio_cmn_power_well_disable,
1726         .is_enabled = vlv_power_well_enabled,
1727 };
1728
1729 static struct i915_power_well i9xx_always_on_power_well[] = {
1730         {
1731                 .name = "always-on",
1732                 .always_on = 1,
1733                 .domains = POWER_DOMAIN_MASK,
1734                 .ops = &i9xx_always_on_power_well_ops,
1735         },
1736 };
1737
1738 static const struct i915_power_well_ops hsw_power_well_ops = {
1739         .sync_hw = hsw_power_well_sync_hw,
1740         .enable = hsw_power_well_enable,
1741         .disable = hsw_power_well_disable,
1742         .is_enabled = hsw_power_well_enabled,
1743 };
1744
1745 static const struct i915_power_well_ops skl_power_well_ops = {
1746         .sync_hw = skl_power_well_sync_hw,
1747         .enable = skl_power_well_enable,
1748         .disable = skl_power_well_disable,
1749         .is_enabled = skl_power_well_enabled,
1750 };
1751
1752 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1753         .sync_hw = gen9_dc_off_power_well_sync_hw,
1754         .enable = gen9_dc_off_power_well_enable,
1755         .disable = gen9_dc_off_power_well_disable,
1756         .is_enabled = gen9_dc_off_power_well_enabled,
1757 };
1758
1759 static struct i915_power_well hsw_power_wells[] = {
1760         {
1761                 .name = "always-on",
1762                 .always_on = 1,
1763                 .domains = POWER_DOMAIN_MASK,
1764                 .ops = &i9xx_always_on_power_well_ops,
1765         },
1766         {
1767                 .name = "display",
1768                 .domains = HSW_DISPLAY_POWER_DOMAINS,
1769                 .ops = &hsw_power_well_ops,
1770         },
1771 };
1772
1773 static struct i915_power_well bdw_power_wells[] = {
1774         {
1775                 .name = "always-on",
1776                 .always_on = 1,
1777                 .domains = POWER_DOMAIN_MASK,
1778                 .ops = &i9xx_always_on_power_well_ops,
1779         },
1780         {
1781                 .name = "display",
1782                 .domains = BDW_DISPLAY_POWER_DOMAINS,
1783                 .ops = &hsw_power_well_ops,
1784         },
1785 };
1786
1787 static const struct i915_power_well_ops vlv_display_power_well_ops = {
1788         .sync_hw = vlv_power_well_sync_hw,
1789         .enable = vlv_display_power_well_enable,
1790         .disable = vlv_display_power_well_disable,
1791         .is_enabled = vlv_power_well_enabled,
1792 };
1793
1794 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1795         .sync_hw = vlv_power_well_sync_hw,
1796         .enable = vlv_dpio_cmn_power_well_enable,
1797         .disable = vlv_dpio_cmn_power_well_disable,
1798         .is_enabled = vlv_power_well_enabled,
1799 };
1800
1801 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1802         .sync_hw = vlv_power_well_sync_hw,
1803         .enable = vlv_power_well_enable,
1804         .disable = vlv_power_well_disable,
1805         .is_enabled = vlv_power_well_enabled,
1806 };
1807
1808 static struct i915_power_well vlv_power_wells[] = {
1809         {
1810                 .name = "always-on",
1811                 .always_on = 1,
1812                 .domains = POWER_DOMAIN_MASK,
1813                 .ops = &i9xx_always_on_power_well_ops,
1814                 .data = PUNIT_POWER_WELL_ALWAYS_ON,
1815         },
1816         {
1817                 .name = "display",
1818                 .domains = VLV_DISPLAY_POWER_DOMAINS,
1819                 .data = PUNIT_POWER_WELL_DISP2D,
1820                 .ops = &vlv_display_power_well_ops,
1821         },
1822         {
1823                 .name = "dpio-tx-b-01",
1824                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1825                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1826                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1827                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1828                 .ops = &vlv_dpio_power_well_ops,
1829                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1830         },
1831         {
1832                 .name = "dpio-tx-b-23",
1833                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1834                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1835                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1836                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1837                 .ops = &vlv_dpio_power_well_ops,
1838                 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1839         },
1840         {
1841                 .name = "dpio-tx-c-01",
1842                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1843                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1844                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1845                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1846                 .ops = &vlv_dpio_power_well_ops,
1847                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1848         },
1849         {
1850                 .name = "dpio-tx-c-23",
1851                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1852                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1853                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1854                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1855                 .ops = &vlv_dpio_power_well_ops,
1856                 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1857         },
1858         {
1859                 .name = "dpio-common",
1860                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1861                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1862                 .ops = &vlv_dpio_cmn_power_well_ops,
1863         },
1864 };
1865
1866 static struct i915_power_well chv_power_wells[] = {
1867         {
1868                 .name = "always-on",
1869                 .always_on = 1,
1870                 .domains = POWER_DOMAIN_MASK,
1871                 .ops = &i9xx_always_on_power_well_ops,
1872         },
1873         {
1874                 .name = "display",
1875                 /*
1876                  * Pipe A power well is the new disp2d well. Pipe B and C
1877                  * power wells don't actually exist. Pipe A power well is
1878                  * required for any pipe to work.
1879                  */
1880                 .domains = CHV_DISPLAY_POWER_DOMAINS,
1881                 .data = PIPE_A,
1882                 .ops = &chv_pipe_power_well_ops,
1883         },
1884         {
1885                 .name = "dpio-common-bc",
1886                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
1887                 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1888                 .ops = &chv_dpio_cmn_power_well_ops,
1889         },
1890         {
1891                 .name = "dpio-common-d",
1892                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
1893                 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1894                 .ops = &chv_dpio_cmn_power_well_ops,
1895         },
1896 };
1897
1898 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1899                                     int power_well_id)
1900 {
1901         struct i915_power_well *power_well;
1902         bool ret;
1903
1904         power_well = lookup_power_well(dev_priv, power_well_id);
1905         ret = power_well->ops->is_enabled(dev_priv, power_well);
1906
1907         return ret;
1908 }
1909
1910 static struct i915_power_well skl_power_wells[] = {
1911         {
1912                 .name = "always-on",
1913                 .always_on = 1,
1914                 .domains = POWER_DOMAIN_MASK,
1915                 .ops = &i9xx_always_on_power_well_ops,
1916                 .data = SKL_DISP_PW_ALWAYS_ON,
1917         },
1918         {
1919                 .name = "power well 1",
1920                 /* Handled by the DMC firmware */
1921                 .domains = 0,
1922                 .ops = &skl_power_well_ops,
1923                 .data = SKL_DISP_PW_1,
1924         },
1925         {
1926                 .name = "MISC IO power well",
1927                 /* Handled by the DMC firmware */
1928                 .domains = 0,
1929                 .ops = &skl_power_well_ops,
1930                 .data = SKL_DISP_PW_MISC_IO,
1931         },
1932         {
1933                 .name = "DC off",
1934                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1935                 .ops = &gen9_dc_off_power_well_ops,
1936                 .data = SKL_DISP_PW_DC_OFF,
1937         },
1938         {
1939                 .name = "power well 2",
1940                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1941                 .ops = &skl_power_well_ops,
1942                 .data = SKL_DISP_PW_2,
1943         },
1944         {
1945                 .name = "DDI A/E power well",
1946                 .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1947                 .ops = &skl_power_well_ops,
1948                 .data = SKL_DISP_PW_DDI_A_E,
1949         },
1950         {
1951                 .name = "DDI B power well",
1952                 .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1953                 .ops = &skl_power_well_ops,
1954                 .data = SKL_DISP_PW_DDI_B,
1955         },
1956         {
1957                 .name = "DDI C power well",
1958                 .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1959                 .ops = &skl_power_well_ops,
1960                 .data = SKL_DISP_PW_DDI_C,
1961         },
1962         {
1963                 .name = "DDI D power well",
1964                 .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1965                 .ops = &skl_power_well_ops,
1966                 .data = SKL_DISP_PW_DDI_D,
1967         },
1968 };
1969
1970 static struct i915_power_well bxt_power_wells[] = {
1971         {
1972                 .name = "always-on",
1973                 .always_on = 1,
1974                 .domains = POWER_DOMAIN_MASK,
1975                 .ops = &i9xx_always_on_power_well_ops,
1976         },
1977         {
1978                 .name = "power well 1",
1979                 .domains = 0,
1980                 .ops = &skl_power_well_ops,
1981                 .data = SKL_DISP_PW_1,
1982         },
1983         {
1984                 .name = "DC off",
1985                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
1986                 .ops = &gen9_dc_off_power_well_ops,
1987                 .data = SKL_DISP_PW_DC_OFF,
1988         },
1989         {
1990                 .name = "power well 2",
1991                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1992                 .ops = &skl_power_well_ops,
1993                 .data = SKL_DISP_PW_2,
1994         },
1995 };
1996
1997 static int
1998 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1999                                    int disable_power_well)
2000 {
2001         if (disable_power_well >= 0)
2002                 return !!disable_power_well;
2003
2004         return 1;
2005 }
2006
2007 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2008                                     int enable_dc)
2009 {
2010         uint32_t mask;
2011         int requested_dc;
2012         int max_dc;
2013
2014         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2015                 max_dc = 2;
2016                 mask = 0;
2017         } else if (IS_BROXTON(dev_priv)) {
2018                 max_dc = 1;
2019                 /*
2020                  * DC9 has a separate HW flow from the rest of the DC states,
2021                  * not depending on the DMC firmware. It's needed by system
2022                  * suspend/resume, so allow it unconditionally.
2023                  */
2024                 mask = DC_STATE_EN_DC9;
2025         } else {
2026                 max_dc = 0;
2027                 mask = 0;
2028         }
2029
2030         if (!i915.disable_power_well)
2031                 max_dc = 0;
2032
2033         if (enable_dc >= 0 && enable_dc <= max_dc) {
2034                 requested_dc = enable_dc;
2035         } else if (enable_dc == -1) {
2036                 requested_dc = max_dc;
2037         } else if (enable_dc > max_dc && enable_dc <= 2) {
2038                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2039                               enable_dc, max_dc);
2040                 requested_dc = max_dc;
2041         } else {
2042                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2043                 requested_dc = max_dc;
2044         }
2045
2046         if (requested_dc > 1)
2047                 mask |= DC_STATE_EN_UPTO_DC6;
2048         if (requested_dc > 0)
2049                 mask |= DC_STATE_EN_UPTO_DC5;
2050
2051         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2052
2053         return mask;
2054 }
2055
2056 #define set_power_wells(power_domains, __power_wells) ({                \
2057         (power_domains)->power_wells = (__power_wells);                 \
2058         (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
2059 })
2060
2061 /**
2062  * intel_power_domains_init - initializes the power domain structures
2063  * @dev_priv: i915 device instance
2064  *
2065  * Initializes the power domain structures for @dev_priv depending upon the
2066  * supported platform.
2067  */
2068 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2069 {
2070         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2071
2072         i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2073                                                      i915.disable_power_well);
2074         dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
2075                                                             i915.enable_dc);
2076
2077         BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
2078
2079         mutex_init(&power_domains->lock);
2080
2081         /*
2082          * The enabling order will be from lower to higher indexed wells,
2083          * the disabling order is reversed.
2084          */
2085         if (IS_HASWELL(dev_priv)) {
2086                 set_power_wells(power_domains, hsw_power_wells);
2087         } else if (IS_BROADWELL(dev_priv)) {
2088                 set_power_wells(power_domains, bdw_power_wells);
2089         } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
2090                 set_power_wells(power_domains, skl_power_wells);
2091         } else if (IS_BROXTON(dev_priv)) {
2092                 set_power_wells(power_domains, bxt_power_wells);
2093         } else if (IS_CHERRYVIEW(dev_priv)) {
2094                 set_power_wells(power_domains, chv_power_wells);
2095         } else if (IS_VALLEYVIEW(dev_priv)) {
2096                 set_power_wells(power_domains, vlv_power_wells);
2097         } else {
2098                 set_power_wells(power_domains, i9xx_always_on_power_well);
2099         }
2100
2101         return 0;
2102 }
2103
2104 /**
2105  * intel_power_domains_fini - finalizes the power domain structures
2106  * @dev_priv: i915 device instance
2107  *
2108  * Finalizes the power domain structures for @dev_priv depending upon the
2109  * supported platform. This function also disables runtime pm and ensures that
2110  * the device stays powered up so that the driver can be reloaded.
2111  */
2112 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2113 {
2114         struct device *device = &dev_priv->dev->pdev->dev;
2115
2116         /*
2117          * The i915.ko module is still not prepared to be loaded when
2118          * the power well is not enabled, so just enable it in case
2119          * we're going to unload/reload.
2120          * The following also reacquires the RPM reference the core passed
2121          * to the driver during loading, which is dropped in
2122          * intel_runtime_pm_enable(). We have to hand back the control of the
2123          * device to the core with this reference held.
2124          */
2125         intel_display_set_init_power(dev_priv, true);
2126
2127         /* Remove the refcount we took to keep power well support disabled. */
2128         if (!i915.disable_power_well)
2129                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2130
2131         /*
2132          * Remove the refcount we took in intel_runtime_pm_enable() in case
2133          * the platform doesn't support runtime PM.
2134          */
2135         if (!HAS_RUNTIME_PM(dev_priv))
2136                 pm_runtime_put(device);
2137 }
2138
2139 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2140 {
2141         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2142         struct i915_power_well *power_well;
2143         int i;
2144
2145         mutex_lock(&power_domains->lock);
2146         for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
2147                 power_well->ops->sync_hw(dev_priv, power_well);
2148                 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2149                                                                      power_well);
2150         }
2151         mutex_unlock(&power_domains->lock);
2152 }
2153
2154 static void skl_display_core_init(struct drm_i915_private *dev_priv,
2155                                    bool resume)
2156 {
2157         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2158         struct i915_power_well *well;
2159         uint32_t val;
2160
2161         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2162
2163         /* enable PCH reset handshake */
2164         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2165         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2166
2167         /* enable PG1 and Misc I/O */
2168         mutex_lock(&power_domains->lock);
2169
2170         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2171         intel_power_well_enable(dev_priv, well);
2172
2173         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2174         intel_power_well_enable(dev_priv, well);
2175
2176         mutex_unlock(&power_domains->lock);
2177
2178         if (!resume)
2179                 return;
2180
2181         skl_init_cdclk(dev_priv);
2182
2183         if (dev_priv->csr.dmc_payload)
2184                 intel_csr_load_program(dev_priv);
2185 }
2186
2187 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2188 {
2189         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2190         struct i915_power_well *well;
2191
2192         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2193
2194         skl_uninit_cdclk(dev_priv);
2195
2196         /* The spec doesn't call for removing the reset handshake flag */
2197         /* disable PG1 and Misc I/O */
2198
2199         mutex_lock(&power_domains->lock);
2200
2201         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2202         intel_power_well_disable(dev_priv, well);
2203
2204         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2205         intel_power_well_disable(dev_priv, well);
2206
2207         mutex_unlock(&power_domains->lock);
2208 }
2209
2210 void bxt_display_core_init(struct drm_i915_private *dev_priv,
2211                            bool resume)
2212 {
2213         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2214         struct i915_power_well *well;
2215         uint32_t val;
2216
2217         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2218
2219         /*
2220          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2221          * or else the reset will hang because there is no PCH to respond.
2222          * Move the handshake programming to initialization sequence.
2223          * Previously was left up to BIOS.
2224          */
2225         val = I915_READ(HSW_NDE_RSTWRN_OPT);
2226         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2227         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2228
2229         /* Enable PG1 */
2230         mutex_lock(&power_domains->lock);
2231
2232         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2233         intel_power_well_enable(dev_priv, well);
2234
2235         mutex_unlock(&power_domains->lock);
2236
2237         broxton_init_cdclk(dev_priv);
2238         broxton_ddi_phy_init(dev_priv);
2239
2240         broxton_cdclk_verify_state(dev_priv);
2241         broxton_ddi_phy_verify_state(dev_priv);
2242
2243         if (resume && dev_priv->csr.dmc_payload)
2244                 intel_csr_load_program(dev_priv);
2245 }
2246
2247 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2248 {
2249         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2250         struct i915_power_well *well;
2251
2252         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2253
2254         broxton_ddi_phy_uninit(dev_priv);
2255         broxton_uninit_cdclk(dev_priv);
2256
2257         /* The spec doesn't call for removing the reset handshake flag */
2258
2259         /* Disable PG1 */
2260         mutex_lock(&power_domains->lock);
2261
2262         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2263         intel_power_well_disable(dev_priv, well);
2264
2265         mutex_unlock(&power_domains->lock);
2266 }
2267
2268 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2269 {
2270         struct i915_power_well *cmn_bc =
2271                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2272         struct i915_power_well *cmn_d =
2273                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2274
2275         /*
2276          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2277          * workaround never ever read DISPLAY_PHY_CONTROL, and
2278          * instead maintain a shadow copy ourselves. Use the actual
2279          * power well state and lane status to reconstruct the
2280          * expected initial value.
2281          */
2282         dev_priv->chv_phy_control =
2283                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2284                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2285                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2286                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2287                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2288
2289         /*
2290          * If all lanes are disabled we leave the override disabled
2291          * with all power down bits cleared to match the state we
2292          * would use after disabling the port. Otherwise enable the
2293          * override and set the lane powerdown bits accding to the
2294          * current lane status.
2295          */
2296         if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2297                 uint32_t status = I915_READ(DPLL(PIPE_A));
2298                 unsigned int mask;
2299
2300                 mask = status & DPLL_PORTB_READY_MASK;
2301                 if (mask == 0xf)
2302                         mask = 0x0;
2303                 else
2304                         dev_priv->chv_phy_control |=
2305                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2306
2307                 dev_priv->chv_phy_control |=
2308                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2309
2310                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2311                 if (mask == 0xf)
2312                         mask = 0x0;
2313                 else
2314                         dev_priv->chv_phy_control |=
2315                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2316
2317                 dev_priv->chv_phy_control |=
2318                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2319
2320                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2321
2322                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2323         } else {
2324                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2325         }
2326
2327         if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2328                 uint32_t status = I915_READ(DPIO_PHY_STATUS);
2329                 unsigned int mask;
2330
2331                 mask = status & DPLL_PORTD_READY_MASK;
2332
2333                 if (mask == 0xf)
2334                         mask = 0x0;
2335                 else
2336                         dev_priv->chv_phy_control |=
2337                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2338
2339                 dev_priv->chv_phy_control |=
2340                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2341
2342                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2343
2344                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2345         } else {
2346                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2347         }
2348
2349         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2350
2351         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2352                       dev_priv->chv_phy_control);
2353 }
2354
2355 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2356 {
2357         struct i915_power_well *cmn =
2358                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2359         struct i915_power_well *disp2d =
2360                 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2361
2362         /* If the display might be already active skip this */
2363         if (cmn->ops->is_enabled(dev_priv, cmn) &&
2364             disp2d->ops->is_enabled(dev_priv, disp2d) &&
2365             I915_READ(DPIO_CTL) & DPIO_CMNRST)
2366                 return;
2367
2368         DRM_DEBUG_KMS("toggling display PHY side reset\n");
2369
2370         /* cmnlane needs DPLL registers */
2371         disp2d->ops->enable(dev_priv, disp2d);
2372
2373         /*
2374          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2375          * Need to assert and de-assert PHY SB reset by gating the
2376          * common lane power, then un-gating it.
2377          * Simply ungating isn't enough to reset the PHY enough to get
2378          * ports and lanes running.
2379          */
2380         cmn->ops->disable(dev_priv, cmn);
2381 }
2382
2383 /**
2384  * intel_power_domains_init_hw - initialize hardware power domain state
2385  * @dev_priv: i915 device instance
2386  *
2387  * This function initializes the hardware power domain state and enables all
2388  * power domains using intel_display_set_init_power().
2389  */
2390 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2391 {
2392         struct drm_device *dev = dev_priv->dev;
2393         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2394
2395         power_domains->initializing = true;
2396
2397         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2398                 skl_display_core_init(dev_priv, resume);
2399         } else if (IS_BROXTON(dev)) {
2400                 bxt_display_core_init(dev_priv, resume);
2401         } else if (IS_CHERRYVIEW(dev)) {
2402                 mutex_lock(&power_domains->lock);
2403                 chv_phy_control_init(dev_priv);
2404                 mutex_unlock(&power_domains->lock);
2405         } else if (IS_VALLEYVIEW(dev)) {
2406                 mutex_lock(&power_domains->lock);
2407                 vlv_cmnlane_wa(dev_priv);
2408                 mutex_unlock(&power_domains->lock);
2409         }
2410
2411         /* For now, we need the power well to be always enabled. */
2412         intel_display_set_init_power(dev_priv, true);
2413         /* Disable power support if the user asked so. */
2414         if (!i915.disable_power_well)
2415                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2416         intel_power_domains_sync_hw(dev_priv);
2417         power_domains->initializing = false;
2418 }
2419
2420 /**
2421  * intel_power_domains_suspend - suspend power domain state
2422  * @dev_priv: i915 device instance
2423  *
2424  * This function prepares the hardware power domain state before entering
2425  * system suspend. It must be paired with intel_power_domains_init_hw().
2426  */
2427 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2428 {
2429         /*
2430          * Even if power well support was disabled we still want to disable
2431          * power wells while we are system suspended.
2432          */
2433         if (!i915.disable_power_well)
2434                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2435
2436         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2437                 skl_display_core_uninit(dev_priv);
2438         else if (IS_BROXTON(dev_priv))
2439                 bxt_display_core_uninit(dev_priv);
2440 }
2441
2442 /**
2443  * intel_runtime_pm_get - grab a runtime pm reference
2444  * @dev_priv: i915 device instance
2445  *
2446  * This function grabs a device-level runtime pm reference (mostly used for GEM
2447  * code to ensure the GTT or GT is on) and ensures that it is powered up.
2448  *
2449  * Any runtime pm reference obtained by this function must have a symmetric
2450  * call to intel_runtime_pm_put() to release the reference again.
2451  */
2452 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2453 {
2454         struct drm_device *dev = dev_priv->dev;
2455         struct device *device = &dev->pdev->dev;
2456
2457         pm_runtime_get_sync(device);
2458
2459         atomic_inc(&dev_priv->pm.wakeref_count);
2460         assert_rpm_wakelock_held(dev_priv);
2461 }
2462
2463 /**
2464  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2465  * @dev_priv: i915 device instance
2466  *
2467  * This function grabs a device-level runtime pm reference if the device is
2468  * already in use and ensures that it is powered up.
2469  *
2470  * Any runtime pm reference obtained by this function must have a symmetric
2471  * call to intel_runtime_pm_put() to release the reference again.
2472  */
2473 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2474 {
2475         struct drm_device *dev = dev_priv->dev;
2476         struct device *device = &dev->pdev->dev;
2477
2478         if (IS_ENABLED(CONFIG_PM)) {
2479                 int ret = pm_runtime_get_if_in_use(device);
2480
2481                 /*
2482                  * In cases runtime PM is disabled by the RPM core and we get
2483                  * an -EINVAL return value we are not supposed to call this
2484                  * function, since the power state is undefined. This applies
2485                  * atm to the late/early system suspend/resume handlers.
2486                  */
2487                 WARN_ON_ONCE(ret < 0);
2488                 if (ret <= 0)
2489                         return false;
2490         }
2491
2492         atomic_inc(&dev_priv->pm.wakeref_count);
2493         assert_rpm_wakelock_held(dev_priv);
2494
2495         return true;
2496 }
2497
2498 /**
2499  * intel_runtime_pm_get_noresume - grab a runtime pm reference
2500  * @dev_priv: i915 device instance
2501  *
2502  * This function grabs a device-level runtime pm reference (mostly used for GEM
2503  * code to ensure the GTT or GT is on).
2504  *
2505  * It will _not_ power up the device but instead only check that it's powered
2506  * on.  Therefore it is only valid to call this functions from contexts where
2507  * the device is known to be powered up and where trying to power it up would
2508  * result in hilarity and deadlocks. That pretty much means only the system
2509  * suspend/resume code where this is used to grab runtime pm references for
2510  * delayed setup down in work items.
2511  *
2512  * Any runtime pm reference obtained by this function must have a symmetric
2513  * call to intel_runtime_pm_put() to release the reference again.
2514  */
2515 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2516 {
2517         struct drm_device *dev = dev_priv->dev;
2518         struct device *device = &dev->pdev->dev;
2519
2520         assert_rpm_wakelock_held(dev_priv);
2521         pm_runtime_get_noresume(device);
2522
2523         atomic_inc(&dev_priv->pm.wakeref_count);
2524 }
2525
2526 /**
2527  * intel_runtime_pm_put - release a runtime pm reference
2528  * @dev_priv: i915 device instance
2529  *
2530  * This function drops the device-level runtime pm reference obtained by
2531  * intel_runtime_pm_get() and might power down the corresponding
2532  * hardware block right away if this is the last reference.
2533  */
2534 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2535 {
2536         struct drm_device *dev = dev_priv->dev;
2537         struct device *device = &dev->pdev->dev;
2538
2539         assert_rpm_wakelock_held(dev_priv);
2540         if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
2541                 atomic_inc(&dev_priv->pm.atomic_seq);
2542
2543         pm_runtime_mark_last_busy(device);
2544         pm_runtime_put_autosuspend(device);
2545 }
2546
2547 /**
2548  * intel_runtime_pm_enable - enable runtime pm
2549  * @dev_priv: i915 device instance
2550  *
2551  * This function enables runtime pm at the end of the driver load sequence.
2552  *
2553  * Note that this function does currently not enable runtime pm for the
2554  * subordinate display power domains. That is only done on the first modeset
2555  * using intel_display_set_init_power().
2556  */
2557 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2558 {
2559         struct drm_device *dev = dev_priv->dev;
2560         struct device *device = &dev->pdev->dev;
2561
2562         pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2563         pm_runtime_mark_last_busy(device);
2564
2565         /*
2566          * Take a permanent reference to disable the RPM functionality and drop
2567          * it only when unloading the driver. Use the low level get/put helpers,
2568          * so the driver's own RPM reference tracking asserts also work on
2569          * platforms without RPM support.
2570          */
2571         if (!HAS_RUNTIME_PM(dev)) {
2572                 pm_runtime_dont_use_autosuspend(device);
2573                 pm_runtime_get_sync(device);
2574         } else {
2575                 pm_runtime_use_autosuspend(device);
2576         }
2577
2578         /*
2579          * The core calls the driver load handler with an RPM reference held.
2580          * We drop that here and will reacquire it during unloading in
2581          * intel_power_domains_fini().
2582          */
2583         pm_runtime_put_autosuspend(device);
2584 }
2585