Merge branch 'topic/ppgtt' into drm-intel-next-queued
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static int i915_modeset __read_mostly = -1;
42 module_param_named(modeset, i915_modeset, int, 0400);
43 MODULE_PARM_DESC(modeset,
44                 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
45                 "1=on, -1=force vga console preference [default])");
46
47 int i915_panel_ignore_lid __read_mostly = 1;
48 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
49 MODULE_PARM_DESC(panel_ignore_lid,
50                 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
51                 "-1=force lid closed, -2=force lid open)");
52
53 unsigned int i915_powersave __read_mostly = 1;
54 module_param_named(powersave, i915_powersave, int, 0600);
55 MODULE_PARM_DESC(powersave,
56                 "Enable powersavings, fbc, downclocking, etc. (default: true)");
57
58 int i915_semaphores __read_mostly = -1;
59 module_param_named(semaphores, i915_semaphores, int, 0400);
60 MODULE_PARM_DESC(semaphores,
61                 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
62
63 int i915_enable_rc6 __read_mostly = -1;
64 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
65 MODULE_PARM_DESC(i915_enable_rc6,
66                 "Enable power-saving render C-state 6. "
67                 "Different stages can be selected via bitmask values "
68                 "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
69                 "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
70                 "default: -1 (use per-chip default)");
71
72 int i915_enable_fbc __read_mostly = -1;
73 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
74 MODULE_PARM_DESC(i915_enable_fbc,
75                 "Enable frame buffer compression for power savings "
76                 "(default: -1 (use per-chip default))");
77
78 unsigned int i915_lvds_downclock __read_mostly = 0;
79 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
80 MODULE_PARM_DESC(lvds_downclock,
81                 "Use panel (LVDS/eDP) downclocking for power savings "
82                 "(default: false)");
83
84 int i915_lvds_channel_mode __read_mostly;
85 module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
86 MODULE_PARM_DESC(lvds_channel_mode,
87                  "Specify LVDS channel mode "
88                  "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
89
90 int i915_panel_use_ssc __read_mostly = -1;
91 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
92 MODULE_PARM_DESC(lvds_use_ssc,
93                 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
94                 "(default: auto from VBT)");
95
96 int i915_vbt_sdvo_panel_type __read_mostly = -1;
97 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
98 MODULE_PARM_DESC(vbt_sdvo_panel_type,
99                 "Override/Ignore selection of SDVO panel mode in the VBT "
100                 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
101
102 static bool i915_try_reset __read_mostly = true;
103 module_param_named(reset, i915_try_reset, bool, 0600);
104 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
105
106 bool i915_enable_hangcheck __read_mostly = true;
107 module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
108 MODULE_PARM_DESC(enable_hangcheck,
109                 "Periodically check GPU activity for detecting hangs. "
110                 "WARNING: Disabling this can cause system wide hangs. "
111                 "(default: true)");
112
113 int i915_enable_ppgtt __read_mostly = -1;
114 module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
115 MODULE_PARM_DESC(i915_enable_ppgtt,
116                 "Override PPGTT usage. "
117                 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
118
119 int i915_enable_psr __read_mostly = 0;
120 module_param_named(enable_psr, i915_enable_psr, int, 0600);
121 MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
122
123 unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
124 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
125 MODULE_PARM_DESC(preliminary_hw_support,
126                 "Enable preliminary hardware support.");
127
128 int i915_disable_power_well __read_mostly = 1;
129 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
130 MODULE_PARM_DESC(disable_power_well,
131                  "Disable the power well when possible (default: true)");
132
133 int i915_enable_ips __read_mostly = 1;
134 module_param_named(enable_ips, i915_enable_ips, int, 0600);
135 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
136
137 bool i915_fastboot __read_mostly = 0;
138 module_param_named(fastboot, i915_fastboot, bool, 0600);
139 MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
140                  "(default: false)");
141
142 int i915_enable_pc8 __read_mostly = 1;
143 module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
144 MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
145
146 int i915_pc8_timeout __read_mostly = 5000;
147 module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
148 MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
149
150 bool i915_prefault_disable __read_mostly;
151 module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
152 MODULE_PARM_DESC(prefault_disable,
153                 "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
154
155 static struct drm_driver driver;
156
157 static const struct intel_device_info intel_i830_info = {
158         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
159         .has_overlay = 1, .overlay_needs_physical = 1,
160         .ring_mask = RENDER_RING,
161 };
162
163 static const struct intel_device_info intel_845g_info = {
164         .gen = 2, .num_pipes = 1,
165         .has_overlay = 1, .overlay_needs_physical = 1,
166         .ring_mask = RENDER_RING,
167 };
168
169 static const struct intel_device_info intel_i85x_info = {
170         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
171         .cursor_needs_physical = 1,
172         .has_overlay = 1, .overlay_needs_physical = 1,
173         .has_fbc = 1,
174         .ring_mask = RENDER_RING,
175 };
176
177 static const struct intel_device_info intel_i865g_info = {
178         .gen = 2, .num_pipes = 1,
179         .has_overlay = 1, .overlay_needs_physical = 1,
180         .ring_mask = RENDER_RING,
181 };
182
183 static const struct intel_device_info intel_i915g_info = {
184         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
185         .has_overlay = 1, .overlay_needs_physical = 1,
186         .ring_mask = RENDER_RING,
187 };
188 static const struct intel_device_info intel_i915gm_info = {
189         .gen = 3, .is_mobile = 1, .num_pipes = 2,
190         .cursor_needs_physical = 1,
191         .has_overlay = 1, .overlay_needs_physical = 1,
192         .supports_tv = 1,
193         .has_fbc = 1,
194         .ring_mask = RENDER_RING,
195 };
196 static const struct intel_device_info intel_i945g_info = {
197         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
198         .has_overlay = 1, .overlay_needs_physical = 1,
199         .ring_mask = RENDER_RING,
200 };
201 static const struct intel_device_info intel_i945gm_info = {
202         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
203         .has_hotplug = 1, .cursor_needs_physical = 1,
204         .has_overlay = 1, .overlay_needs_physical = 1,
205         .supports_tv = 1,
206         .has_fbc = 1,
207         .ring_mask = RENDER_RING,
208 };
209
210 static const struct intel_device_info intel_i965g_info = {
211         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
212         .has_hotplug = 1,
213         .has_overlay = 1,
214         .ring_mask = RENDER_RING,
215 };
216
217 static const struct intel_device_info intel_i965gm_info = {
218         .gen = 4, .is_crestline = 1, .num_pipes = 2,
219         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
220         .has_overlay = 1,
221         .supports_tv = 1,
222         .ring_mask = RENDER_RING,
223 };
224
225 static const struct intel_device_info intel_g33_info = {
226         .gen = 3, .is_g33 = 1, .num_pipes = 2,
227         .need_gfx_hws = 1, .has_hotplug = 1,
228         .has_overlay = 1,
229         .ring_mask = RENDER_RING,
230 };
231
232 static const struct intel_device_info intel_g45_info = {
233         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
234         .has_pipe_cxsr = 1, .has_hotplug = 1,
235         .ring_mask = RENDER_RING | BSD_RING,
236 };
237
238 static const struct intel_device_info intel_gm45_info = {
239         .gen = 4, .is_g4x = 1, .num_pipes = 2,
240         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
241         .has_pipe_cxsr = 1, .has_hotplug = 1,
242         .supports_tv = 1,
243         .ring_mask = RENDER_RING | BSD_RING,
244 };
245
246 static const struct intel_device_info intel_pineview_info = {
247         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
248         .need_gfx_hws = 1, .has_hotplug = 1,
249         .has_overlay = 1,
250 };
251
252 static const struct intel_device_info intel_ironlake_d_info = {
253         .gen = 5, .num_pipes = 2,
254         .need_gfx_hws = 1, .has_hotplug = 1,
255         .ring_mask = RENDER_RING | BSD_RING,
256 };
257
258 static const struct intel_device_info intel_ironlake_m_info = {
259         .gen = 5, .is_mobile = 1, .num_pipes = 2,
260         .need_gfx_hws = 1, .has_hotplug = 1,
261         .has_fbc = 1,
262         .ring_mask = RENDER_RING | BSD_RING,
263 };
264
265 static const struct intel_device_info intel_sandybridge_d_info = {
266         .gen = 6, .num_pipes = 2,
267         .need_gfx_hws = 1, .has_hotplug = 1,
268         .has_fbc = 1,
269         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
270         .has_llc = 1,
271 };
272
273 static const struct intel_device_info intel_sandybridge_m_info = {
274         .gen = 6, .is_mobile = 1, .num_pipes = 2,
275         .need_gfx_hws = 1, .has_hotplug = 1,
276         .has_fbc = 1,
277         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
278         .has_llc = 1,
279 };
280
281 #define GEN7_FEATURES  \
282         .gen = 7, .num_pipes = 3, \
283         .need_gfx_hws = 1, .has_hotplug = 1, \
284         .has_fbc = 1, \
285         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
286         .has_llc = 1
287
288 static const struct intel_device_info intel_ivybridge_d_info = {
289         GEN7_FEATURES,
290         .is_ivybridge = 1,
291 };
292
293 static const struct intel_device_info intel_ivybridge_m_info = {
294         GEN7_FEATURES,
295         .is_ivybridge = 1,
296         .is_mobile = 1,
297 };
298
299 static const struct intel_device_info intel_ivybridge_q_info = {
300         GEN7_FEATURES,
301         .is_ivybridge = 1,
302         .num_pipes = 0, /* legal, last one wins */
303 };
304
305 static const struct intel_device_info intel_valleyview_m_info = {
306         GEN7_FEATURES,
307         .is_mobile = 1,
308         .num_pipes = 2,
309         .is_valleyview = 1,
310         .display_mmio_offset = VLV_DISPLAY_BASE,
311         .has_fbc = 0, /* legal, last one wins */
312         .has_llc = 0, /* legal, last one wins */
313 };
314
315 static const struct intel_device_info intel_valleyview_d_info = {
316         GEN7_FEATURES,
317         .num_pipes = 2,
318         .is_valleyview = 1,
319         .display_mmio_offset = VLV_DISPLAY_BASE,
320         .has_fbc = 0, /* legal, last one wins */
321         .has_llc = 0, /* legal, last one wins */
322 };
323
324 static const struct intel_device_info intel_haswell_d_info = {
325         GEN7_FEATURES,
326         .is_haswell = 1,
327         .has_ddi = 1,
328         .has_fpga_dbg = 1,
329         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
330 };
331
332 static const struct intel_device_info intel_haswell_m_info = {
333         GEN7_FEATURES,
334         .is_haswell = 1,
335         .is_mobile = 1,
336         .has_ddi = 1,
337         .has_fpga_dbg = 1,
338         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
339 };
340
341 static const struct intel_device_info intel_broadwell_d_info = {
342         .gen = 8, .num_pipes = 3,
343         .need_gfx_hws = 1, .has_hotplug = 1,
344         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
345         .has_llc = 1,
346         .has_ddi = 1,
347 };
348
349 static const struct intel_device_info intel_broadwell_m_info = {
350         .gen = 8, .is_mobile = 1, .num_pipes = 3,
351         .need_gfx_hws = 1, .has_hotplug = 1,
352         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
353         .has_llc = 1,
354         .has_ddi = 1,
355 };
356
357 /*
358  * Make sure any device matches here are from most specific to most
359  * general.  For example, since the Quanta match is based on the subsystem
360  * and subvendor IDs, we need it to come before the more general IVB
361  * PCI ID matches, otherwise we'll use the wrong info struct above.
362  */
363 #define INTEL_PCI_IDS \
364         INTEL_I830_IDS(&intel_i830_info),       \
365         INTEL_I845G_IDS(&intel_845g_info),      \
366         INTEL_I85X_IDS(&intel_i85x_info),       \
367         INTEL_I865G_IDS(&intel_i865g_info),     \
368         INTEL_I915G_IDS(&intel_i915g_info),     \
369         INTEL_I915GM_IDS(&intel_i915gm_info),   \
370         INTEL_I945G_IDS(&intel_i945g_info),     \
371         INTEL_I945GM_IDS(&intel_i945gm_info),   \
372         INTEL_I965G_IDS(&intel_i965g_info),     \
373         INTEL_G33_IDS(&intel_g33_info),         \
374         INTEL_I965GM_IDS(&intel_i965gm_info),   \
375         INTEL_GM45_IDS(&intel_gm45_info),       \
376         INTEL_G45_IDS(&intel_g45_info),         \
377         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
378         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
379         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
380         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
381         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
382         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
383         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
384         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
385         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
386         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
387         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
388         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
389         INTEL_BDW_M_IDS(&intel_broadwell_m_info),       \
390         INTEL_BDW_D_IDS(&intel_broadwell_d_info)
391
392 static const struct pci_device_id pciidlist[] = {               /* aka */
393         INTEL_PCI_IDS,
394         {0, 0, 0}
395 };
396
397 #if defined(CONFIG_DRM_I915_KMS)
398 MODULE_DEVICE_TABLE(pci, pciidlist);
399 #endif
400
401 void intel_detect_pch(struct drm_device *dev)
402 {
403         struct drm_i915_private *dev_priv = dev->dev_private;
404         struct pci_dev *pch;
405
406         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
407          * (which really amounts to a PCH but no South Display).
408          */
409         if (INTEL_INFO(dev)->num_pipes == 0) {
410                 dev_priv->pch_type = PCH_NOP;
411                 return;
412         }
413
414         /*
415          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
416          * make graphics device passthrough work easy for VMM, that only
417          * need to expose ISA bridge to let driver know the real hardware
418          * underneath. This is a requirement from virtualization team.
419          *
420          * In some virtualized environments (e.g. XEN), there is irrelevant
421          * ISA bridge in the system. To work reliably, we should scan trhough
422          * all the ISA bridge devices and check for the first match, instead
423          * of only checking the first one.
424          */
425         pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
426         while (pch) {
427                 struct pci_dev *curr = pch;
428                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
429                         unsigned short id;
430                         id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
431                         dev_priv->pch_id = id;
432
433                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
434                                 dev_priv->pch_type = PCH_IBX;
435                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
436                                 WARN_ON(!IS_GEN5(dev));
437                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
438                                 dev_priv->pch_type = PCH_CPT;
439                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
440                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
441                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
442                                 /* PantherPoint is CPT compatible */
443                                 dev_priv->pch_type = PCH_CPT;
444                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
445                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
446                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
447                                 dev_priv->pch_type = PCH_LPT;
448                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
449                                 WARN_ON(!IS_HASWELL(dev));
450                                 WARN_ON(IS_ULT(dev));
451                         } else if (IS_BROADWELL(dev)) {
452                                 dev_priv->pch_type = PCH_LPT;
453                                 dev_priv->pch_id =
454                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
455                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
456                                               "LynxPoint LP PCH\n");
457                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
458                                 dev_priv->pch_type = PCH_LPT;
459                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
460                                 WARN_ON(!IS_HASWELL(dev));
461                                 WARN_ON(!IS_ULT(dev));
462                         } else {
463                                 goto check_next;
464                         }
465                         pci_dev_put(pch);
466                         break;
467                 }
468 check_next:
469                 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
470                 pci_dev_put(curr);
471         }
472         if (!pch)
473                 DRM_DEBUG_KMS("No PCH found?\n");
474 }
475
476 bool i915_semaphore_is_enabled(struct drm_device *dev)
477 {
478         if (INTEL_INFO(dev)->gen < 6)
479                 return false;
480
481         /* Until we get further testing... */
482         if (IS_GEN8(dev)) {
483                 WARN_ON(!i915_preliminary_hw_support);
484                 return false;
485         }
486
487         if (i915_semaphores >= 0)
488                 return i915_semaphores;
489
490 #ifdef CONFIG_INTEL_IOMMU
491         /* Enable semaphores on SNB when IO remapping is off */
492         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
493                 return false;
494 #endif
495
496         return true;
497 }
498
499 static int i915_drm_freeze(struct drm_device *dev)
500 {
501         struct drm_i915_private *dev_priv = dev->dev_private;
502         struct drm_crtc *crtc;
503
504         intel_runtime_pm_get(dev_priv);
505
506         /* ignore lid events during suspend */
507         mutex_lock(&dev_priv->modeset_restore_lock);
508         dev_priv->modeset_restore = MODESET_SUSPENDED;
509         mutex_unlock(&dev_priv->modeset_restore_lock);
510
511         /* We do a lot of poking in a lot of registers, make sure they work
512          * properly. */
513         hsw_disable_package_c8(dev_priv);
514         intel_display_set_init_power(dev, true);
515
516         drm_kms_helper_poll_disable(dev);
517
518         pci_save_state(dev->pdev);
519
520         /* If KMS is active, we do the leavevt stuff here */
521         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
522                 int error;
523
524                 error = i915_gem_suspend(dev);
525                 if (error) {
526                         dev_err(&dev->pdev->dev,
527                                 "GEM idle failed, resume might fail\n");
528                         return error;
529                 }
530
531                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
532
533                 drm_irq_uninstall(dev);
534                 dev_priv->enable_hotplug_processing = false;
535                 /*
536                  * Disable CRTCs directly since we want to preserve sw state
537                  * for _thaw.
538                  */
539                 mutex_lock(&dev->mode_config.mutex);
540                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
541                         dev_priv->display.crtc_disable(crtc);
542                 mutex_unlock(&dev->mode_config.mutex);
543
544                 intel_modeset_suspend_hw(dev);
545         }
546
547         i915_gem_suspend_gtt_mappings(dev);
548
549         i915_save_state(dev);
550
551         intel_opregion_fini(dev);
552
553         console_lock();
554         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
555         console_unlock();
556
557         return 0;
558 }
559
560 int i915_suspend(struct drm_device *dev, pm_message_t state)
561 {
562         int error;
563
564         if (!dev || !dev->dev_private) {
565                 DRM_ERROR("dev: %p\n", dev);
566                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
567                 return -ENODEV;
568         }
569
570         if (state.event == PM_EVENT_PRETHAW)
571                 return 0;
572
573
574         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
575                 return 0;
576
577         error = i915_drm_freeze(dev);
578         if (error)
579                 return error;
580
581         if (state.event == PM_EVENT_SUSPEND) {
582                 /* Shut down the device */
583                 pci_disable_device(dev->pdev);
584                 pci_set_power_state(dev->pdev, PCI_D3hot);
585         }
586
587         return 0;
588 }
589
590 void intel_console_resume(struct work_struct *work)
591 {
592         struct drm_i915_private *dev_priv =
593                 container_of(work, struct drm_i915_private,
594                              console_resume_work);
595         struct drm_device *dev = dev_priv->dev;
596
597         console_lock();
598         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
599         console_unlock();
600 }
601
602 static void intel_resume_hotplug(struct drm_device *dev)
603 {
604         struct drm_mode_config *mode_config = &dev->mode_config;
605         struct intel_encoder *encoder;
606
607         mutex_lock(&mode_config->mutex);
608         DRM_DEBUG_KMS("running encoder hotplug functions\n");
609
610         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
611                 if (encoder->hot_plug)
612                         encoder->hot_plug(encoder);
613
614         mutex_unlock(&mode_config->mutex);
615
616         /* Just fire off a uevent and let userspace tell us what to do */
617         drm_helper_hpd_irq_event(dev);
618 }
619
620 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
621 {
622         struct drm_i915_private *dev_priv = dev->dev_private;
623         int error = 0;
624
625         intel_uncore_early_sanitize(dev);
626
627         intel_uncore_sanitize(dev);
628
629         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
630             restore_gtt_mappings) {
631                 mutex_lock(&dev->struct_mutex);
632                 i915_gem_restore_gtt_mappings(dev);
633                 mutex_unlock(&dev->struct_mutex);
634         }
635
636         intel_power_domains_init_hw(dev);
637
638         i915_restore_state(dev);
639         intel_opregion_setup(dev);
640
641         /* KMS EnterVT equivalent */
642         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
643                 intel_init_pch_refclk(dev);
644                 drm_mode_config_reset(dev);
645
646                 mutex_lock(&dev->struct_mutex);
647
648                 error = i915_gem_init_hw(dev);
649                 mutex_unlock(&dev->struct_mutex);
650
651                 /* We need working interrupts for modeset enabling ... */
652                 drm_irq_install(dev);
653
654                 intel_modeset_init_hw(dev);
655
656                 drm_modeset_lock_all(dev);
657                 intel_modeset_setup_hw_state(dev, true);
658                 drm_modeset_unlock_all(dev);
659
660                 /*
661                  * ... but also need to make sure that hotplug processing
662                  * doesn't cause havoc. Like in the driver load code we don't
663                  * bother with the tiny race here where we might loose hotplug
664                  * notifications.
665                  * */
666                 intel_hpd_init(dev);
667                 dev_priv->enable_hotplug_processing = true;
668                 /* Config may have changed between suspend and resume */
669                 intel_resume_hotplug(dev);
670         }
671
672         intel_opregion_init(dev);
673
674         /*
675          * The console lock can be pretty contented on resume due
676          * to all the printk activity.  Try to keep it out of the hot
677          * path of resume if possible.
678          */
679         if (console_trylock()) {
680                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
681                 console_unlock();
682         } else {
683                 schedule_work(&dev_priv->console_resume_work);
684         }
685
686         /* Undo what we did at i915_drm_freeze so the refcount goes back to the
687          * expected level. */
688         hsw_enable_package_c8(dev_priv);
689
690         mutex_lock(&dev_priv->modeset_restore_lock);
691         dev_priv->modeset_restore = MODESET_DONE;
692         mutex_unlock(&dev_priv->modeset_restore_lock);
693
694         intel_runtime_pm_put(dev_priv);
695         return error;
696 }
697
698 static int i915_drm_thaw(struct drm_device *dev)
699 {
700         if (drm_core_check_feature(dev, DRIVER_MODESET))
701                 i915_check_and_clear_faults(dev);
702
703         return __i915_drm_thaw(dev, true);
704 }
705
706 int i915_resume(struct drm_device *dev)
707 {
708         struct drm_i915_private *dev_priv = dev->dev_private;
709         int ret;
710
711         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
712                 return 0;
713
714         if (pci_enable_device(dev->pdev))
715                 return -EIO;
716
717         pci_set_master(dev->pdev);
718
719         /*
720          * Platforms with opregion should have sane BIOS, older ones (gen3 and
721          * earlier) need to restore the GTT mappings since the BIOS might clear
722          * all our scratch PTEs.
723          */
724         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
725         if (ret)
726                 return ret;
727
728         drm_kms_helper_poll_enable(dev);
729         return 0;
730 }
731
732 /**
733  * i915_reset - reset chip after a hang
734  * @dev: drm device to reset
735  *
736  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
737  * reset or otherwise an error code.
738  *
739  * Procedure is fairly simple:
740  *   - reset the chip using the reset reg
741  *   - re-init context state
742  *   - re-init hardware status page
743  *   - re-init ring buffer
744  *   - re-init interrupt state
745  *   - re-init display
746  */
747 int i915_reset(struct drm_device *dev)
748 {
749         drm_i915_private_t *dev_priv = dev->dev_private;
750         bool simulated;
751         int ret;
752
753         if (!i915_try_reset)
754                 return 0;
755
756         mutex_lock(&dev->struct_mutex);
757
758         i915_gem_reset(dev);
759
760         simulated = dev_priv->gpu_error.stop_rings != 0;
761
762         ret = intel_gpu_reset(dev);
763
764         /* Also reset the gpu hangman. */
765         if (simulated) {
766                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
767                 dev_priv->gpu_error.stop_rings = 0;
768                 if (ret == -ENODEV) {
769                         DRM_INFO("Reset not implemented, but ignoring "
770                                  "error for simulated gpu hangs\n");
771                         ret = 0;
772                 }
773         }
774
775         if (ret) {
776                 DRM_ERROR("Failed to reset chip: %i\n", ret);
777                 mutex_unlock(&dev->struct_mutex);
778                 return ret;
779         }
780
781         /* Ok, now get things going again... */
782
783         /*
784          * Everything depends on having the GTT running, so we need to start
785          * there.  Fortunately we don't need to do this unless we reset the
786          * chip at a PCI level.
787          *
788          * Next we need to restore the context, but we don't use those
789          * yet either...
790          *
791          * Ring buffer needs to be re-initialized in the KMS case, or if X
792          * was running at the time of the reset (i.e. we weren't VT
793          * switched away).
794          */
795         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
796                         !dev_priv->ums.mm_suspended) {
797                 dev_priv->ums.mm_suspended = 0;
798
799                 ret = i915_gem_init_hw(dev);
800                 mutex_unlock(&dev->struct_mutex);
801                 if (ret) {
802                         DRM_ERROR("Failed hw init on reset %d\n", ret);
803                         return ret;
804                 }
805
806                 drm_irq_uninstall(dev);
807                 drm_irq_install(dev);
808                 intel_hpd_init(dev);
809         } else {
810                 mutex_unlock(&dev->struct_mutex);
811         }
812
813         return 0;
814 }
815
816 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
817 {
818         struct intel_device_info *intel_info =
819                 (struct intel_device_info *) ent->driver_data;
820
821         if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
822                 DRM_INFO("This hardware requires preliminary hardware support.\n"
823                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
824                 return -ENODEV;
825         }
826
827         /* Only bind to function 0 of the device. Early generations
828          * used function 1 as a placeholder for multi-head. This causes
829          * us confusion instead, especially on the systems where both
830          * functions have the same PCI-ID!
831          */
832         if (PCI_FUNC(pdev->devfn))
833                 return -ENODEV;
834
835         driver.driver_features &= ~(DRIVER_USE_AGP);
836
837         return drm_get_pci_dev(pdev, ent, &driver);
838 }
839
840 static void
841 i915_pci_remove(struct pci_dev *pdev)
842 {
843         struct drm_device *dev = pci_get_drvdata(pdev);
844
845         drm_put_dev(dev);
846 }
847
848 static int i915_pm_suspend(struct device *dev)
849 {
850         struct pci_dev *pdev = to_pci_dev(dev);
851         struct drm_device *drm_dev = pci_get_drvdata(pdev);
852         int error;
853
854         if (!drm_dev || !drm_dev->dev_private) {
855                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
856                 return -ENODEV;
857         }
858
859         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
860                 return 0;
861
862         error = i915_drm_freeze(drm_dev);
863         if (error)
864                 return error;
865
866         pci_disable_device(pdev);
867         pci_set_power_state(pdev, PCI_D3hot);
868
869         return 0;
870 }
871
872 static int i915_pm_resume(struct device *dev)
873 {
874         struct pci_dev *pdev = to_pci_dev(dev);
875         struct drm_device *drm_dev = pci_get_drvdata(pdev);
876
877         return i915_resume(drm_dev);
878 }
879
880 static int i915_pm_freeze(struct device *dev)
881 {
882         struct pci_dev *pdev = to_pci_dev(dev);
883         struct drm_device *drm_dev = pci_get_drvdata(pdev);
884
885         if (!drm_dev || !drm_dev->dev_private) {
886                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
887                 return -ENODEV;
888         }
889
890         return i915_drm_freeze(drm_dev);
891 }
892
893 static int i915_pm_thaw(struct device *dev)
894 {
895         struct pci_dev *pdev = to_pci_dev(dev);
896         struct drm_device *drm_dev = pci_get_drvdata(pdev);
897
898         return i915_drm_thaw(drm_dev);
899 }
900
901 static int i915_pm_poweroff(struct device *dev)
902 {
903         struct pci_dev *pdev = to_pci_dev(dev);
904         struct drm_device *drm_dev = pci_get_drvdata(pdev);
905
906         return i915_drm_freeze(drm_dev);
907 }
908
909 static int i915_runtime_suspend(struct device *device)
910 {
911         struct pci_dev *pdev = to_pci_dev(device);
912         struct drm_device *dev = pci_get_drvdata(pdev);
913         struct drm_i915_private *dev_priv = dev->dev_private;
914
915         WARN_ON(!HAS_RUNTIME_PM(dev));
916
917         DRM_DEBUG_KMS("Suspending device\n");
918
919         i915_gem_release_all_mmaps(dev_priv);
920
921         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
922         dev_priv->pm.suspended = true;
923
924         /*
925          * current versions of firmware which depend on this opregion
926          * notification have repurposed the D1 definition to mean
927          * "runtime suspended" vs. what you would normally expect (D3)
928          * to distinguish it from notifications that might be sent
929          * via the suspend path.
930          */
931         intel_opregion_notify_adapter(dev, PCI_D1);
932
933         return 0;
934 }
935
936 static int i915_runtime_resume(struct device *device)
937 {
938         struct pci_dev *pdev = to_pci_dev(device);
939         struct drm_device *dev = pci_get_drvdata(pdev);
940         struct drm_i915_private *dev_priv = dev->dev_private;
941
942         WARN_ON(!HAS_RUNTIME_PM(dev));
943
944         DRM_DEBUG_KMS("Resuming device\n");
945
946         intel_opregion_notify_adapter(dev, PCI_D0);
947         dev_priv->pm.suspended = false;
948
949         return 0;
950 }
951
952 static const struct dev_pm_ops i915_pm_ops = {
953         .suspend = i915_pm_suspend,
954         .resume = i915_pm_resume,
955         .freeze = i915_pm_freeze,
956         .thaw = i915_pm_thaw,
957         .poweroff = i915_pm_poweroff,
958         .restore = i915_pm_resume,
959         .runtime_suspend = i915_runtime_suspend,
960         .runtime_resume = i915_runtime_resume,
961 };
962
963 static const struct vm_operations_struct i915_gem_vm_ops = {
964         .fault = i915_gem_fault,
965         .open = drm_gem_vm_open,
966         .close = drm_gem_vm_close,
967 };
968
969 static const struct file_operations i915_driver_fops = {
970         .owner = THIS_MODULE,
971         .open = drm_open,
972         .release = drm_release,
973         .unlocked_ioctl = drm_ioctl,
974         .mmap = drm_gem_mmap,
975         .poll = drm_poll,
976         .read = drm_read,
977 #ifdef CONFIG_COMPAT
978         .compat_ioctl = i915_compat_ioctl,
979 #endif
980         .llseek = noop_llseek,
981 };
982
983 static struct drm_driver driver = {
984         /* Don't use MTRRs here; the Xserver or userspace app should
985          * deal with them for Intel hardware.
986          */
987         .driver_features =
988             DRIVER_USE_AGP |
989             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
990             DRIVER_RENDER,
991         .load = i915_driver_load,
992         .unload = i915_driver_unload,
993         .open = i915_driver_open,
994         .lastclose = i915_driver_lastclose,
995         .preclose = i915_driver_preclose,
996         .postclose = i915_driver_postclose,
997
998         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
999         .suspend = i915_suspend,
1000         .resume = i915_resume,
1001
1002         .device_is_agp = i915_driver_device_is_agp,
1003         .master_create = i915_master_create,
1004         .master_destroy = i915_master_destroy,
1005 #if defined(CONFIG_DEBUG_FS)
1006         .debugfs_init = i915_debugfs_init,
1007         .debugfs_cleanup = i915_debugfs_cleanup,
1008 #endif
1009         .gem_free_object = i915_gem_free_object,
1010         .gem_vm_ops = &i915_gem_vm_ops,
1011
1012         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1013         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1014         .gem_prime_export = i915_gem_prime_export,
1015         .gem_prime_import = i915_gem_prime_import,
1016
1017         .dumb_create = i915_gem_dumb_create,
1018         .dumb_map_offset = i915_gem_mmap_gtt,
1019         .dumb_destroy = drm_gem_dumb_destroy,
1020         .ioctls = i915_ioctls,
1021         .fops = &i915_driver_fops,
1022         .name = DRIVER_NAME,
1023         .desc = DRIVER_DESC,
1024         .date = DRIVER_DATE,
1025         .major = DRIVER_MAJOR,
1026         .minor = DRIVER_MINOR,
1027         .patchlevel = DRIVER_PATCHLEVEL,
1028 };
1029
1030 static struct pci_driver i915_pci_driver = {
1031         .name = DRIVER_NAME,
1032         .id_table = pciidlist,
1033         .probe = i915_pci_probe,
1034         .remove = i915_pci_remove,
1035         .driver.pm = &i915_pm_ops,
1036 };
1037
1038 static int __init i915_init(void)
1039 {
1040         driver.num_ioctls = i915_max_ioctl;
1041
1042         /*
1043          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1044          * explicitly disabled with the module pararmeter.
1045          *
1046          * Otherwise, just follow the parameter (defaulting to off).
1047          *
1048          * Allow optional vga_text_mode_force boot option to override
1049          * the default behavior.
1050          */
1051 #if defined(CONFIG_DRM_I915_KMS)
1052         if (i915_modeset != 0)
1053                 driver.driver_features |= DRIVER_MODESET;
1054 #endif
1055         if (i915_modeset == 1)
1056                 driver.driver_features |= DRIVER_MODESET;
1057
1058 #ifdef CONFIG_VGA_CONSOLE
1059         if (vgacon_text_force() && i915_modeset == -1)
1060                 driver.driver_features &= ~DRIVER_MODESET;
1061 #endif
1062
1063         if (!(driver.driver_features & DRIVER_MODESET)) {
1064                 driver.get_vblank_timestamp = NULL;
1065 #ifndef CONFIG_DRM_I915_UMS
1066                 /* Silently fail loading to not upset userspace. */
1067                 return 0;
1068 #endif
1069         }
1070
1071         return drm_pci_init(&driver, &i915_pci_driver);
1072 }
1073
1074 static void __exit i915_exit(void)
1075 {
1076 #ifndef CONFIG_DRM_I915_UMS
1077         if (!(driver.driver_features & DRIVER_MODESET))
1078                 return; /* Never loaded a driver. */
1079 #endif
1080
1081         drm_pci_exit(&driver, &i915_pci_driver);
1082 }
1083
1084 module_init(i915_init);
1085 module_exit(i915_exit);
1086
1087 MODULE_AUTHOR(DRIVER_AUTHOR);
1088 MODULE_DESCRIPTION(DRIVER_DESC);
1089 MODULE_LICENSE("GPL and additional rights");