1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
31 #include <linux/acpi.h>
33 #include <drm/i915_drm.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #include <linux/console.h>
39 #include <linux/module.h>
40 #include <linux/pm_runtime.h>
41 #include <linux/vga_switcheroo.h>
42 #include <drm/drm_crtc_helper.h>
44 static struct drm_driver driver;
46 #define GEN_DEFAULT_PIPEOFFSETS \
47 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
48 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
49 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
50 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
51 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
53 #define GEN_CHV_PIPEOFFSETS \
54 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
55 CHV_PIPE_C_OFFSET }, \
56 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
57 CHV_TRANSCODER_C_OFFSET, }, \
58 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
59 CHV_PALETTE_C_OFFSET }
61 #define CURSOR_OFFSETS \
62 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
64 #define IVB_CURSOR_OFFSETS \
65 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
68 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
70 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
72 static const struct intel_device_info intel_i830_info = {
73 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
74 .has_overlay = 1, .overlay_needs_physical = 1,
75 .ring_mask = RENDER_RING,
76 GEN_DEFAULT_PIPEOFFSETS,
80 static const struct intel_device_info intel_845g_info = {
81 .gen = 2, .num_pipes = 1,
82 .has_overlay = 1, .overlay_needs_physical = 1,
83 .ring_mask = RENDER_RING,
84 GEN_DEFAULT_PIPEOFFSETS,
88 static const struct intel_device_info intel_i85x_info = {
89 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
90 .cursor_needs_physical = 1,
91 .has_overlay = 1, .overlay_needs_physical = 1,
93 .ring_mask = RENDER_RING,
94 GEN_DEFAULT_PIPEOFFSETS,
98 static const struct intel_device_info intel_i865g_info = {
99 .gen = 2, .num_pipes = 1,
100 .has_overlay = 1, .overlay_needs_physical = 1,
101 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS,
106 static const struct intel_device_info intel_i915g_info = {
107 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
108 .has_overlay = 1, .overlay_needs_physical = 1,
109 .ring_mask = RENDER_RING,
110 GEN_DEFAULT_PIPEOFFSETS,
113 static const struct intel_device_info intel_i915gm_info = {
114 .gen = 3, .is_mobile = 1, .num_pipes = 2,
115 .cursor_needs_physical = 1,
116 .has_overlay = 1, .overlay_needs_physical = 1,
119 .ring_mask = RENDER_RING,
120 GEN_DEFAULT_PIPEOFFSETS,
123 static const struct intel_device_info intel_i945g_info = {
124 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
125 .has_overlay = 1, .overlay_needs_physical = 1,
126 .ring_mask = RENDER_RING,
127 GEN_DEFAULT_PIPEOFFSETS,
130 static const struct intel_device_info intel_i945gm_info = {
131 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
132 .has_hotplug = 1, .cursor_needs_physical = 1,
133 .has_overlay = 1, .overlay_needs_physical = 1,
136 .ring_mask = RENDER_RING,
137 GEN_DEFAULT_PIPEOFFSETS,
141 static const struct intel_device_info intel_i965g_info = {
142 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
145 .ring_mask = RENDER_RING,
146 GEN_DEFAULT_PIPEOFFSETS,
150 static const struct intel_device_info intel_i965gm_info = {
151 .gen = 4, .is_crestline = 1, .num_pipes = 2,
152 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
155 .ring_mask = RENDER_RING,
156 GEN_DEFAULT_PIPEOFFSETS,
160 static const struct intel_device_info intel_g33_info = {
161 .gen = 3, .is_g33 = 1, .num_pipes = 2,
162 .need_gfx_hws = 1, .has_hotplug = 1,
164 .ring_mask = RENDER_RING,
165 GEN_DEFAULT_PIPEOFFSETS,
169 static const struct intel_device_info intel_g45_info = {
170 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
171 .has_pipe_cxsr = 1, .has_hotplug = 1,
172 .ring_mask = RENDER_RING | BSD_RING,
173 GEN_DEFAULT_PIPEOFFSETS,
177 static const struct intel_device_info intel_gm45_info = {
178 .gen = 4, .is_g4x = 1, .num_pipes = 2,
179 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
180 .has_pipe_cxsr = 1, .has_hotplug = 1,
182 .ring_mask = RENDER_RING | BSD_RING,
183 GEN_DEFAULT_PIPEOFFSETS,
187 static const struct intel_device_info intel_pineview_info = {
188 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
189 .need_gfx_hws = 1, .has_hotplug = 1,
191 GEN_DEFAULT_PIPEOFFSETS,
195 static const struct intel_device_info intel_ironlake_d_info = {
196 .gen = 5, .num_pipes = 2,
197 .need_gfx_hws = 1, .has_hotplug = 1,
198 .ring_mask = RENDER_RING | BSD_RING,
199 GEN_DEFAULT_PIPEOFFSETS,
203 static const struct intel_device_info intel_ironlake_m_info = {
204 .gen = 5, .is_mobile = 1, .num_pipes = 2,
205 .need_gfx_hws = 1, .has_hotplug = 1,
207 .ring_mask = RENDER_RING | BSD_RING,
208 GEN_DEFAULT_PIPEOFFSETS,
212 static const struct intel_device_info intel_sandybridge_d_info = {
213 .gen = 6, .num_pipes = 2,
214 .need_gfx_hws = 1, .has_hotplug = 1,
216 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
218 GEN_DEFAULT_PIPEOFFSETS,
222 static const struct intel_device_info intel_sandybridge_m_info = {
223 .gen = 6, .is_mobile = 1, .num_pipes = 2,
224 .need_gfx_hws = 1, .has_hotplug = 1,
226 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
228 GEN_DEFAULT_PIPEOFFSETS,
232 #define GEN7_FEATURES \
233 .gen = 7, .num_pipes = 3, \
234 .need_gfx_hws = 1, .has_hotplug = 1, \
236 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
238 GEN_DEFAULT_PIPEOFFSETS, \
241 static const struct intel_device_info intel_ivybridge_d_info = {
246 static const struct intel_device_info intel_ivybridge_m_info = {
252 static const struct intel_device_info intel_ivybridge_q_info = {
255 .num_pipes = 0, /* legal, last one wins */
258 #define VLV_FEATURES \
259 .gen = 7, .num_pipes = 2, \
260 .need_gfx_hws = 1, .has_hotplug = 1, \
261 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
262 .display_mmio_offset = VLV_DISPLAY_BASE, \
263 GEN_DEFAULT_PIPEOFFSETS, \
266 static const struct intel_device_info intel_valleyview_m_info = {
272 static const struct intel_device_info intel_valleyview_d_info = {
277 #define HSW_FEATURES \
279 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
283 static const struct intel_device_info intel_haswell_d_info = {
288 static const struct intel_device_info intel_haswell_m_info = {
294 #define BDW_FEATURES \
298 static const struct intel_device_info intel_broadwell_d_info = {
304 static const struct intel_device_info intel_broadwell_m_info = {
306 .gen = 8, .is_mobile = 1,
310 static const struct intel_device_info intel_broadwell_gt3d_info = {
314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
317 static const struct intel_device_info intel_broadwell_gt3m_info = {
319 .gen = 8, .is_mobile = 1,
321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
324 static const struct intel_device_info intel_cherryview_info = {
325 .gen = 8, .num_pipes = 3,
326 .need_gfx_hws = 1, .has_hotplug = 1,
327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
329 .display_mmio_offset = VLV_DISPLAY_BASE,
335 static const struct intel_device_info intel_skylake_info = {
341 static const struct intel_device_info intel_skylake_gt3_info = {
345 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
348 static const struct intel_device_info intel_broxton_info = {
352 .need_gfx_hws = 1, .has_hotplug = 1,
353 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
359 GEN_DEFAULT_PIPEOFFSETS,
364 static const struct intel_device_info intel_kabylake_info = {
370 static const struct intel_device_info intel_kabylake_gt3_info = {
374 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
378 * Make sure any device matches here are from most specific to most
379 * general. For example, since the Quanta match is based on the subsystem
380 * and subvendor IDs, we need it to come before the more general IVB
381 * PCI ID matches, otherwise we'll use the wrong info struct above.
383 static const struct pci_device_id pciidlist[] = {
384 INTEL_I830_IDS(&intel_i830_info),
385 INTEL_I845G_IDS(&intel_845g_info),
386 INTEL_I85X_IDS(&intel_i85x_info),
387 INTEL_I865G_IDS(&intel_i865g_info),
388 INTEL_I915G_IDS(&intel_i915g_info),
389 INTEL_I915GM_IDS(&intel_i915gm_info),
390 INTEL_I945G_IDS(&intel_i945g_info),
391 INTEL_I945GM_IDS(&intel_i945gm_info),
392 INTEL_I965G_IDS(&intel_i965g_info),
393 INTEL_G33_IDS(&intel_g33_info),
394 INTEL_I965GM_IDS(&intel_i965gm_info),
395 INTEL_GM45_IDS(&intel_gm45_info),
396 INTEL_G45_IDS(&intel_g45_info),
397 INTEL_PINEVIEW_IDS(&intel_pineview_info),
398 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
399 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
400 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
401 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
402 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
403 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
404 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
405 INTEL_HSW_D_IDS(&intel_haswell_d_info),
406 INTEL_HSW_M_IDS(&intel_haswell_m_info),
407 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
408 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
409 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
410 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
411 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
412 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
413 INTEL_CHV_IDS(&intel_cherryview_info),
414 INTEL_SKL_GT1_IDS(&intel_skylake_info),
415 INTEL_SKL_GT2_IDS(&intel_skylake_info),
416 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
417 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
418 INTEL_BXT_IDS(&intel_broxton_info),
419 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
420 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
421 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
422 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
426 MODULE_DEVICE_TABLE(pci, pciidlist);
428 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
430 enum intel_pch ret = PCH_NOP;
433 * In a virtualized passthrough environment we can be in a
434 * setup where the ISA bridge is not able to be passed through.
435 * In this case, a south bridge can be emulated and we have to
436 * make an educated guess as to which PCH is really there.
441 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
442 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
444 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
445 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
447 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
448 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
450 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
456 void intel_detect_pch(struct drm_device *dev)
458 struct drm_i915_private *dev_priv = dev->dev_private;
459 struct pci_dev *pch = NULL;
461 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
462 * (which really amounts to a PCH but no South Display).
464 if (INTEL_INFO(dev)->num_pipes == 0) {
465 dev_priv->pch_type = PCH_NOP;
470 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
471 * make graphics device passthrough work easy for VMM, that only
472 * need to expose ISA bridge to let driver know the real hardware
473 * underneath. This is a requirement from virtualization team.
475 * In some virtualized environments (e.g. XEN), there is irrelevant
476 * ISA bridge in the system. To work reliably, we should scan trhough
477 * all the ISA bridge devices and check for the first match, instead
478 * of only checking the first one.
480 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
481 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
482 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
483 dev_priv->pch_id = id;
485 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
486 dev_priv->pch_type = PCH_IBX;
487 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
488 WARN_ON(!IS_GEN5(dev));
489 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
490 dev_priv->pch_type = PCH_CPT;
491 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
492 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
493 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
494 /* PantherPoint is CPT compatible */
495 dev_priv->pch_type = PCH_CPT;
496 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
497 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
498 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
499 dev_priv->pch_type = PCH_LPT;
500 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
501 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
502 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
503 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
504 dev_priv->pch_type = PCH_LPT;
505 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
506 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
507 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
508 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
509 dev_priv->pch_type = PCH_SPT;
510 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
511 WARN_ON(!IS_SKYLAKE(dev) &&
513 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
514 dev_priv->pch_type = PCH_SPT;
515 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
516 WARN_ON(!IS_SKYLAKE(dev) &&
518 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
519 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
520 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
521 pch->subsystem_vendor ==
522 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
523 pch->subsystem_device ==
524 PCI_SUBDEVICE_ID_QEMU)) {
525 dev_priv->pch_type = intel_virt_detect_pch(dev);
533 DRM_DEBUG_KMS("No PCH found.\n");
538 bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
540 if (INTEL_GEN(dev_priv) < 6)
543 if (i915.semaphores >= 0)
544 return i915.semaphores;
546 /* TODO: make semaphores and Execlists play nicely together */
547 if (i915.enable_execlists)
550 #ifdef CONFIG_INTEL_IOMMU
551 /* Enable semaphores on SNB when IO remapping is off */
552 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
559 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
561 struct drm_device *dev = dev_priv->dev;
562 struct intel_encoder *encoder;
564 drm_modeset_lock_all(dev);
565 for_each_intel_encoder(dev, encoder)
566 if (encoder->suspend)
567 encoder->suspend(encoder);
568 drm_modeset_unlock_all(dev);
571 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
573 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
575 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
577 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
578 if (acpi_target_system_state() < ACPI_STATE_S3)
584 static int i915_drm_suspend(struct drm_device *dev)
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 pci_power_t opregion_target_state;
590 /* ignore lid events during suspend */
591 mutex_lock(&dev_priv->modeset_restore_lock);
592 dev_priv->modeset_restore = MODESET_SUSPENDED;
593 mutex_unlock(&dev_priv->modeset_restore_lock);
595 disable_rpm_wakeref_asserts(dev_priv);
597 /* We do a lot of poking in a lot of registers, make sure they work
599 intel_display_set_init_power(dev_priv, true);
601 drm_kms_helper_poll_disable(dev);
603 pci_save_state(dev->pdev);
605 error = i915_gem_suspend(dev);
607 dev_err(&dev->pdev->dev,
608 "GEM idle failed, resume might fail\n");
612 intel_guc_suspend(dev);
614 intel_suspend_gt_powersave(dev_priv);
616 intel_display_suspend(dev);
618 intel_dp_mst_suspend(dev);
620 intel_runtime_pm_disable_interrupts(dev_priv);
621 intel_hpd_cancel_work(dev_priv);
623 intel_suspend_encoders(dev_priv);
625 intel_suspend_hw(dev);
627 i915_gem_suspend_gtt_mappings(dev);
629 i915_save_state(dev);
631 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
632 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
634 intel_uncore_forcewake_reset(dev_priv, false);
635 intel_opregion_unregister(dev_priv);
637 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
639 dev_priv->suspend_count++;
641 intel_display_set_init_power(dev_priv, false);
643 intel_csr_ucode_suspend(dev_priv);
646 enable_rpm_wakeref_asserts(dev_priv);
651 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
653 struct drm_i915_private *dev_priv = drm_dev->dev_private;
657 disable_rpm_wakeref_asserts(dev_priv);
659 fw_csr = !IS_BROXTON(dev_priv) &&
660 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
662 * In case of firmware assisted context save/restore don't manually
663 * deinit the power domains. This also means the CSR/DMC firmware will
664 * stay active, it will power down any HW resources as required and
665 * also enable deeper system power states that would be blocked if the
666 * firmware was inactive.
669 intel_power_domains_suspend(dev_priv);
672 if (IS_BROXTON(dev_priv))
673 bxt_enable_dc9(dev_priv);
674 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
675 hsw_enable_pc8(dev_priv);
676 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
677 ret = vlv_suspend_complete(dev_priv);
680 DRM_ERROR("Suspend complete failed: %d\n", ret);
682 intel_power_domains_init_hw(dev_priv, true);
687 pci_disable_device(drm_dev->pdev);
689 * During hibernation on some platforms the BIOS may try to access
690 * the device even though it's already in D3 and hang the machine. So
691 * leave the device in D0 on those platforms and hope the BIOS will
692 * power down the device properly. The issue was seen on multiple old
693 * GENs with different BIOS vendors, so having an explicit blacklist
694 * is inpractical; apply the workaround on everything pre GEN6. The
695 * platforms where the issue was seen:
696 * Lenovo Thinkpad X301, X61s, X60, T60, X41
700 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
701 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
703 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
706 enable_rpm_wakeref_asserts(dev_priv);
711 int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
715 if (!dev || !dev->dev_private) {
716 DRM_ERROR("dev: %p\n", dev);
717 DRM_ERROR("DRM not initialized, aborting suspend.\n");
721 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
722 state.event != PM_EVENT_FREEZE))
725 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
728 error = i915_drm_suspend(dev);
732 return i915_drm_suspend_late(dev, false);
735 static int i915_drm_resume(struct drm_device *dev)
737 struct drm_i915_private *dev_priv = dev->dev_private;
740 disable_rpm_wakeref_asserts(dev_priv);
742 ret = i915_ggtt_enable_hw(dev);
744 DRM_ERROR("failed to re-enable GGTT\n");
746 intel_csr_ucode_resume(dev_priv);
748 mutex_lock(&dev->struct_mutex);
749 i915_gem_restore_gtt_mappings(dev);
750 mutex_unlock(&dev->struct_mutex);
752 i915_restore_state(dev);
753 intel_opregion_setup(dev_priv);
755 intel_init_pch_refclk(dev);
756 drm_mode_config_reset(dev);
759 * Interrupts have to be enabled before any batches are run. If not the
760 * GPU will hang. i915_gem_init_hw() will initiate batches to
761 * update/restore the context.
763 * Modeset enabling in intel_modeset_init_hw() also needs working
766 intel_runtime_pm_enable_interrupts(dev_priv);
768 mutex_lock(&dev->struct_mutex);
769 if (i915_gem_init_hw(dev)) {
770 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
771 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
773 mutex_unlock(&dev->struct_mutex);
775 intel_guc_resume(dev);
777 intel_modeset_init_hw(dev);
779 spin_lock_irq(&dev_priv->irq_lock);
780 if (dev_priv->display.hpd_irq_setup)
781 dev_priv->display.hpd_irq_setup(dev_priv);
782 spin_unlock_irq(&dev_priv->irq_lock);
784 intel_dp_mst_resume(dev);
786 intel_display_resume(dev);
789 * ... but also need to make sure that hotplug processing
790 * doesn't cause havoc. Like in the driver load code we don't
791 * bother with the tiny race here where we might loose hotplug
794 intel_hpd_init(dev_priv);
795 /* Config may have changed between suspend and resume */
796 drm_helper_hpd_irq_event(dev);
798 intel_opregion_register(dev_priv);
800 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
802 mutex_lock(&dev_priv->modeset_restore_lock);
803 dev_priv->modeset_restore = MODESET_DONE;
804 mutex_unlock(&dev_priv->modeset_restore_lock);
806 intel_opregion_notify_adapter(dev_priv, PCI_D0);
808 drm_kms_helper_poll_enable(dev);
810 enable_rpm_wakeref_asserts(dev_priv);
815 static int i915_drm_resume_early(struct drm_device *dev)
817 struct drm_i915_private *dev_priv = dev->dev_private;
821 * We have a resume ordering issue with the snd-hda driver also
822 * requiring our device to be power up. Due to the lack of a
823 * parent/child relationship we currently solve this with an early
826 * FIXME: This should be solved with a special hdmi sink device or
827 * similar so that power domains can be employed.
831 * Note that we need to set the power state explicitly, since we
832 * powered off the device during freeze and the PCI core won't power
833 * it back up for us during thaw. Powering off the device during
834 * freeze is not a hard requirement though, and during the
835 * suspend/resume phases the PCI core makes sure we get here with the
836 * device powered on. So in case we change our freeze logic and keep
837 * the device powered we can also remove the following set power state
840 ret = pci_set_power_state(dev->pdev, PCI_D0);
842 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
847 * Note that pci_enable_device() first enables any parent bridge
848 * device and only then sets the power state for this device. The
849 * bridge enabling is a nop though, since bridge devices are resumed
850 * first. The order of enabling power and enabling the device is
851 * imposed by the PCI core as described above, so here we preserve the
852 * same order for the freeze/thaw phases.
854 * TODO: eventually we should remove pci_disable_device() /
855 * pci_enable_enable_device() from suspend/resume. Due to how they
856 * depend on the device enable refcount we can't anyway depend on them
857 * disabling/enabling the device.
859 if (pci_enable_device(dev->pdev)) {
864 pci_set_master(dev->pdev);
866 disable_rpm_wakeref_asserts(dev_priv);
868 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
869 ret = vlv_resume_prepare(dev_priv, false);
871 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
874 intel_uncore_early_sanitize(dev_priv, true);
876 if (IS_BROXTON(dev_priv)) {
877 if (!dev_priv->suspended_to_idle)
878 gen9_sanitize_dc_state(dev_priv);
879 bxt_disable_dc9(dev_priv);
880 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
881 hsw_disable_pc8(dev_priv);
884 intel_uncore_sanitize(dev_priv);
886 if (IS_BROXTON(dev_priv) ||
887 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
888 intel_power_domains_init_hw(dev_priv, true);
890 enable_rpm_wakeref_asserts(dev_priv);
893 dev_priv->suspended_to_idle = false;
898 int i915_resume_switcheroo(struct drm_device *dev)
902 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
905 ret = i915_drm_resume_early(dev);
909 return i915_drm_resume(dev);
913 * i915_reset - reset chip after a hang
914 * @dev: drm device to reset
916 * Reset the chip. Useful if a hang is detected. Returns zero on successful
917 * reset or otherwise an error code.
919 * Procedure is fairly simple:
920 * - reset the chip using the reset reg
921 * - re-init context state
922 * - re-init hardware status page
923 * - re-init ring buffer
924 * - re-init interrupt state
927 int i915_reset(struct drm_i915_private *dev_priv)
929 struct drm_device *dev = dev_priv->dev;
930 struct i915_gpu_error *error = &dev_priv->gpu_error;
931 unsigned reset_counter;
934 intel_reset_gt_powersave(dev_priv);
936 mutex_lock(&dev->struct_mutex);
938 /* Clear any previous failed attempts at recovery. Time to try again. */
939 atomic_andnot(I915_WEDGED, &error->reset_counter);
941 /* Clear the reset-in-progress flag and increment the reset epoch. */
942 reset_counter = atomic_inc_return(&error->reset_counter);
943 if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
950 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
952 /* Also reset the gpu hangman. */
953 if (error->stop_rings != 0) {
954 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
955 error->stop_rings = 0;
956 if (ret == -ENODEV) {
957 DRM_INFO("Reset not implemented, but ignoring "
958 "error for simulated gpu hangs\n");
963 if (i915_stop_ring_allow_warn(dev_priv))
964 pr_notice("drm/i915: Resetting chip after gpu hang\n");
968 DRM_ERROR("Failed to reset chip: %i\n", ret);
970 DRM_DEBUG_DRIVER("GPU reset disabled\n");
974 intel_overlay_reset(dev_priv);
976 /* Ok, now get things going again... */
979 * Everything depends on having the GTT running, so we need to start
980 * there. Fortunately we don't need to do this unless we reset the
981 * chip at a PCI level.
983 * Next we need to restore the context, but we don't use those
986 * Ring buffer needs to be re-initialized in the KMS case, or if X
987 * was running at the time of the reset (i.e. we weren't VT
990 ret = i915_gem_init_hw(dev);
992 DRM_ERROR("Failed hw init on reset %d\n", ret);
996 mutex_unlock(&dev->struct_mutex);
999 * rps/rc6 re-init is necessary to restore state lost after the
1000 * reset and the re-install of gt irqs. Skip for ironlake per
1001 * previous concerns that it doesn't respond well to some forms
1002 * of re-init after reset.
1004 if (INTEL_INFO(dev)->gen > 5)
1005 intel_enable_gt_powersave(dev_priv);
1010 atomic_or(I915_WEDGED, &error->reset_counter);
1011 mutex_unlock(&dev->struct_mutex);
1015 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1017 struct intel_device_info *intel_info =
1018 (struct intel_device_info *) ent->driver_data;
1020 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
1021 DRM_INFO("This hardware requires preliminary hardware support.\n"
1022 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1026 /* Only bind to function 0 of the device. Early generations
1027 * used function 1 as a placeholder for multi-head. This causes
1028 * us confusion instead, especially on the systems where both
1029 * functions have the same PCI-ID!
1031 if (PCI_FUNC(pdev->devfn))
1034 if (vga_switcheroo_client_probe_defer(pdev))
1035 return -EPROBE_DEFER;
1037 return i915_driver_load(pdev, ent, &driver);
1041 i915_pci_remove(struct pci_dev *pdev)
1043 struct drm_device *dev = pci_get_drvdata(pdev);
1045 i915_driver_unload(dev);
1049 static int i915_pm_suspend(struct device *dev)
1051 struct pci_dev *pdev = to_pci_dev(dev);
1052 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1054 if (!drm_dev || !drm_dev->dev_private) {
1055 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1059 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1062 return i915_drm_suspend(drm_dev);
1065 static int i915_pm_suspend_late(struct device *dev)
1067 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1070 * We have a suspend ordering issue with the snd-hda driver also
1071 * requiring our device to be power up. Due to the lack of a
1072 * parent/child relationship we currently solve this with an late
1075 * FIXME: This should be solved with a special hdmi sink device or
1076 * similar so that power domains can be employed.
1078 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1081 return i915_drm_suspend_late(drm_dev, false);
1084 static int i915_pm_poweroff_late(struct device *dev)
1086 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1088 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1091 return i915_drm_suspend_late(drm_dev, true);
1094 static int i915_pm_resume_early(struct device *dev)
1096 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1098 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1101 return i915_drm_resume_early(drm_dev);
1104 static int i915_pm_resume(struct device *dev)
1106 struct drm_device *drm_dev = dev_to_i915(dev)->dev;
1108 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1111 return i915_drm_resume(drm_dev);
1114 /* freeze: before creating the hibernation_image */
1115 static int i915_pm_freeze(struct device *dev)
1117 return i915_pm_suspend(dev);
1120 static int i915_pm_freeze_late(struct device *dev)
1124 ret = i915_pm_suspend_late(dev);
1128 ret = i915_gem_freeze_late(dev_to_i915(dev));
1135 /* thaw: called after creating the hibernation image, but before turning off. */
1136 static int i915_pm_thaw_early(struct device *dev)
1138 return i915_pm_resume_early(dev);
1141 static int i915_pm_thaw(struct device *dev)
1143 return i915_pm_resume(dev);
1146 /* restore: called after loading the hibernation image. */
1147 static int i915_pm_restore_early(struct device *dev)
1149 return i915_pm_resume_early(dev);
1152 static int i915_pm_restore(struct device *dev)
1154 return i915_pm_resume(dev);
1158 * Save all Gunit registers that may be lost after a D3 and a subsequent
1159 * S0i[R123] transition. The list of registers needing a save/restore is
1160 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1161 * registers in the following way:
1162 * - Driver: saved/restored by the driver
1163 * - Punit : saved/restored by the Punit firmware
1164 * - No, w/o marking: no need to save/restore, since the register is R/O or
1165 * used internally by the HW in a way that doesn't depend
1166 * keeping the content across a suspend/resume.
1167 * - Debug : used for debugging
1169 * We save/restore all registers marked with 'Driver', with the following
1171 * - Registers out of use, including also registers marked with 'Debug'.
1172 * These have no effect on the driver's operation, so we don't save/restore
1173 * them to reduce the overhead.
1174 * - Registers that are fully setup by an initialization function called from
1175 * the resume path. For example many clock gating and RPS/RC6 registers.
1176 * - Registers that provide the right functionality with their reset defaults.
1178 * TODO: Except for registers that based on the above 3 criteria can be safely
1179 * ignored, we save/restore all others, practically treating the HW context as
1180 * a black-box for the driver. Further investigation is needed to reduce the
1181 * saved/restored registers even further, by following the same 3 criteria.
1183 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1185 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1188 /* GAM 0x4000-0x4770 */
1189 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1190 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1191 s->arb_mode = I915_READ(ARB_MODE);
1192 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1193 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1195 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1196 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
1198 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1199 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
1201 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1202 s->ecochk = I915_READ(GAM_ECOCHK);
1203 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1204 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1206 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1208 /* MBC 0x9024-0x91D0, 0x8500 */
1209 s->g3dctl = I915_READ(VLV_G3DCTL);
1210 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1211 s->mbctl = I915_READ(GEN6_MBCTL);
1213 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1214 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1215 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1216 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1217 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1218 s->rstctl = I915_READ(GEN6_RSTCTL);
1219 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1221 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1222 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1223 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1224 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1225 s->ecobus = I915_READ(ECOBUS);
1226 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1227 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1228 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1229 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1230 s->rcedata = I915_READ(VLV_RCEDATA);
1231 s->spare2gh = I915_READ(VLV_SPAREG2H);
1233 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1234 s->gt_imr = I915_READ(GTIMR);
1235 s->gt_ier = I915_READ(GTIER);
1236 s->pm_imr = I915_READ(GEN6_PMIMR);
1237 s->pm_ier = I915_READ(GEN6_PMIER);
1239 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1240 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
1242 /* GT SA CZ domain, 0x100000-0x138124 */
1243 s->tilectl = I915_READ(TILECTL);
1244 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1245 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1246 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1247 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1249 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1250 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1251 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1252 s->pcbr = I915_READ(VLV_PCBR);
1253 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1256 * Not saving any of:
1257 * DFT, 0x9800-0x9EC0
1258 * SARB, 0xB000-0xB1FC
1259 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1264 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1266 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1270 /* GAM 0x4000-0x4770 */
1271 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1272 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1273 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1274 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1275 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1277 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1278 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
1280 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1281 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
1283 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1284 I915_WRITE(GAM_ECOCHK, s->ecochk);
1285 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1286 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1288 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1290 /* MBC 0x9024-0x91D0, 0x8500 */
1291 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1292 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1293 I915_WRITE(GEN6_MBCTL, s->mbctl);
1295 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1296 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1297 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1298 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1299 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1300 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1301 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1303 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1304 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1305 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1306 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1307 I915_WRITE(ECOBUS, s->ecobus);
1308 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1309 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1310 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1311 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1312 I915_WRITE(VLV_RCEDATA, s->rcedata);
1313 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1315 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1316 I915_WRITE(GTIMR, s->gt_imr);
1317 I915_WRITE(GTIER, s->gt_ier);
1318 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1319 I915_WRITE(GEN6_PMIER, s->pm_ier);
1321 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1322 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
1324 /* GT SA CZ domain, 0x100000-0x138124 */
1325 I915_WRITE(TILECTL, s->tilectl);
1326 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1328 * Preserve the GT allow wake and GFX force clock bit, they are not
1329 * be restored, as they are used to control the s0ix suspend/resume
1330 * sequence by the caller.
1332 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1333 val &= VLV_GTLC_ALLOWWAKEREQ;
1334 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1335 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1337 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1338 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1339 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1340 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1342 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1344 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1345 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1346 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1347 I915_WRITE(VLV_PCBR, s->pcbr);
1348 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1351 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1356 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1358 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1359 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1361 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1362 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1367 err = wait_for(COND, 20);
1369 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1370 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1376 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1381 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1382 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1384 val |= VLV_GTLC_ALLOWWAKEREQ;
1385 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1386 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1388 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1390 err = wait_for(COND, 1);
1392 DRM_ERROR("timeout disabling GT waking\n");
1397 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1404 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1405 val = wait_for_on ? mask : 0;
1406 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1410 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1412 I915_READ(VLV_GTLC_PW_STATUS));
1415 * RC6 transitioning can be delayed up to 2 msec (see
1416 * valleyview_enable_rps), use 3 msec for safety.
1418 err = wait_for(COND, 3);
1420 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1421 onoff(wait_for_on));
1427 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1429 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1432 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
1433 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1436 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1442 * Bspec defines the following GT well on flags as debug only, so
1443 * don't treat them as hard failures.
1445 (void)vlv_wait_for_gt_wells(dev_priv, false);
1447 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1448 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1450 vlv_check_no_gt_access(dev_priv);
1452 err = vlv_force_gfx_clock(dev_priv, true);
1456 err = vlv_allow_gt_wake(dev_priv, false);
1460 if (!IS_CHERRYVIEW(dev_priv))
1461 vlv_save_gunit_s0ix_state(dev_priv);
1463 err = vlv_force_gfx_clock(dev_priv, false);
1470 /* For safety always re-enable waking and disable gfx clock forcing */
1471 vlv_allow_gt_wake(dev_priv, true);
1473 vlv_force_gfx_clock(dev_priv, false);
1478 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1481 struct drm_device *dev = dev_priv->dev;
1486 * If any of the steps fail just try to continue, that's the best we
1487 * can do at this point. Return the first error code (which will also
1488 * leave RPM permanently disabled).
1490 ret = vlv_force_gfx_clock(dev_priv, true);
1492 if (!IS_CHERRYVIEW(dev_priv))
1493 vlv_restore_gunit_s0ix_state(dev_priv);
1495 err = vlv_allow_gt_wake(dev_priv, true);
1499 err = vlv_force_gfx_clock(dev_priv, false);
1503 vlv_check_no_gt_access(dev_priv);
1506 intel_init_clock_gating(dev);
1507 i915_gem_restore_fences(dev);
1513 static int intel_runtime_suspend(struct device *device)
1515 struct pci_dev *pdev = to_pci_dev(device);
1516 struct drm_device *dev = pci_get_drvdata(pdev);
1517 struct drm_i915_private *dev_priv = dev->dev_private;
1520 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
1523 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1526 DRM_DEBUG_KMS("Suspending device\n");
1529 * We could deadlock here in case another thread holding struct_mutex
1530 * calls RPM suspend concurrently, since the RPM suspend will wait
1531 * first for this RPM suspend to finish. In this case the concurrent
1532 * RPM resume will be followed by its RPM suspend counterpart. Still
1533 * for consistency return -EAGAIN, which will reschedule this suspend.
1535 if (!mutex_trylock(&dev->struct_mutex)) {
1536 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1538 * Bump the expiration timestamp, otherwise the suspend won't
1541 pm_runtime_mark_last_busy(device);
1546 disable_rpm_wakeref_asserts(dev_priv);
1549 * We are safe here against re-faults, since the fault handler takes
1552 i915_gem_release_all_mmaps(dev_priv);
1553 mutex_unlock(&dev->struct_mutex);
1555 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1557 intel_guc_suspend(dev);
1559 intel_suspend_gt_powersave(dev_priv);
1560 intel_runtime_pm_disable_interrupts(dev_priv);
1563 if (IS_BROXTON(dev_priv)) {
1564 bxt_display_core_uninit(dev_priv);
1565 bxt_enable_dc9(dev_priv);
1566 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1567 hsw_enable_pc8(dev_priv);
1568 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1569 ret = vlv_suspend_complete(dev_priv);
1573 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1574 intel_runtime_pm_enable_interrupts(dev_priv);
1576 enable_rpm_wakeref_asserts(dev_priv);
1581 intel_uncore_forcewake_reset(dev_priv, false);
1583 enable_rpm_wakeref_asserts(dev_priv);
1584 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1586 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
1587 DRM_ERROR("Unclaimed access detected prior to suspending\n");
1589 dev_priv->pm.suspended = true;
1592 * FIXME: We really should find a document that references the arguments
1595 if (IS_BROADWELL(dev_priv)) {
1597 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1598 * being detected, and the call we do at intel_runtime_resume()
1599 * won't be able to restore them. Since PCI_D3hot matches the
1600 * actual specification and appears to be working, use it.
1602 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1605 * current versions of firmware which depend on this opregion
1606 * notification have repurposed the D1 definition to mean
1607 * "runtime suspended" vs. what you would normally expect (D3)
1608 * to distinguish it from notifications that might be sent via
1611 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1614 assert_forcewakes_inactive(dev_priv);
1616 DRM_DEBUG_KMS("Device suspended\n");
1620 static int intel_runtime_resume(struct device *device)
1622 struct pci_dev *pdev = to_pci_dev(device);
1623 struct drm_device *dev = pci_get_drvdata(pdev);
1624 struct drm_i915_private *dev_priv = dev->dev_private;
1627 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1630 DRM_DEBUG_KMS("Resuming device\n");
1632 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1633 disable_rpm_wakeref_asserts(dev_priv);
1635 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1636 dev_priv->pm.suspended = false;
1637 if (intel_uncore_unclaimed_mmio(dev_priv))
1638 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
1640 intel_guc_resume(dev);
1642 if (IS_GEN6(dev_priv))
1643 intel_init_pch_refclk(dev);
1645 if (IS_BROXTON(dev)) {
1646 bxt_disable_dc9(dev_priv);
1647 bxt_display_core_init(dev_priv, true);
1648 if (dev_priv->csr.dmc_payload &&
1649 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
1650 gen9_enable_dc5(dev_priv);
1651 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1652 hsw_disable_pc8(dev_priv);
1653 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1654 ret = vlv_resume_prepare(dev_priv, true);
1658 * No point of rolling back things in case of an error, as the best
1659 * we can do is to hope that things will still work (and disable RPM).
1661 i915_gem_init_swizzling(dev);
1662 gen6_update_ring_freq(dev_priv);
1664 intel_runtime_pm_enable_interrupts(dev_priv);
1667 * On VLV/CHV display interrupts are part of the display
1668 * power well, so hpd is reinitialized from there. For
1669 * everyone else do it here.
1671 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1672 intel_hpd_init(dev_priv);
1674 intel_enable_gt_powersave(dev_priv);
1676 enable_rpm_wakeref_asserts(dev_priv);
1679 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1681 DRM_DEBUG_KMS("Device resumed\n");
1686 static const struct dev_pm_ops i915_pm_ops = {
1688 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1691 .suspend = i915_pm_suspend,
1692 .suspend_late = i915_pm_suspend_late,
1693 .resume_early = i915_pm_resume_early,
1694 .resume = i915_pm_resume,
1698 * @freeze, @freeze_late : called (1) before creating the
1699 * hibernation image [PMSG_FREEZE] and
1700 * (2) after rebooting, before restoring
1701 * the image [PMSG_QUIESCE]
1702 * @thaw, @thaw_early : called (1) after creating the hibernation
1703 * image, before writing it [PMSG_THAW]
1704 * and (2) after failing to create or
1705 * restore the image [PMSG_RECOVER]
1706 * @poweroff, @poweroff_late: called after writing the hibernation
1707 * image, before rebooting [PMSG_HIBERNATE]
1708 * @restore, @restore_early : called after rebooting and restoring the
1709 * hibernation image [PMSG_RESTORE]
1711 .freeze = i915_pm_freeze,
1712 .freeze_late = i915_pm_freeze_late,
1713 .thaw_early = i915_pm_thaw_early,
1714 .thaw = i915_pm_thaw,
1715 .poweroff = i915_pm_suspend,
1716 .poweroff_late = i915_pm_poweroff_late,
1717 .restore_early = i915_pm_restore_early,
1718 .restore = i915_pm_restore,
1720 /* S0ix (via runtime suspend) event handlers */
1721 .runtime_suspend = intel_runtime_suspend,
1722 .runtime_resume = intel_runtime_resume,
1725 static const struct vm_operations_struct i915_gem_vm_ops = {
1726 .fault = i915_gem_fault,
1727 .open = drm_gem_vm_open,
1728 .close = drm_gem_vm_close,
1731 static const struct file_operations i915_driver_fops = {
1732 .owner = THIS_MODULE,
1734 .release = drm_release,
1735 .unlocked_ioctl = drm_ioctl,
1736 .mmap = drm_gem_mmap,
1739 #ifdef CONFIG_COMPAT
1740 .compat_ioctl = i915_compat_ioctl,
1742 .llseek = noop_llseek,
1745 static struct drm_driver driver = {
1746 /* Don't use MTRRs here; the Xserver or userspace app should
1747 * deal with them for Intel hardware.
1750 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1751 DRIVER_RENDER | DRIVER_MODESET,
1752 .open = i915_driver_open,
1753 .lastclose = i915_driver_lastclose,
1754 .preclose = i915_driver_preclose,
1755 .postclose = i915_driver_postclose,
1756 .set_busid = drm_pci_set_busid,
1758 .gem_free_object = i915_gem_free_object,
1759 .gem_vm_ops = &i915_gem_vm_ops,
1761 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1762 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1763 .gem_prime_export = i915_gem_prime_export,
1764 .gem_prime_import = i915_gem_prime_import,
1766 .dumb_create = i915_gem_dumb_create,
1767 .dumb_map_offset = i915_gem_mmap_gtt,
1768 .dumb_destroy = drm_gem_dumb_destroy,
1769 .ioctls = i915_ioctls,
1770 .fops = &i915_driver_fops,
1771 .name = DRIVER_NAME,
1772 .desc = DRIVER_DESC,
1773 .date = DRIVER_DATE,
1774 .major = DRIVER_MAJOR,
1775 .minor = DRIVER_MINOR,
1776 .patchlevel = DRIVER_PATCHLEVEL,
1779 static struct pci_driver i915_pci_driver = {
1780 .name = DRIVER_NAME,
1781 .id_table = pciidlist,
1782 .probe = i915_pci_probe,
1783 .remove = i915_pci_remove,
1784 .driver.pm = &i915_pm_ops,
1787 static int __init i915_init(void)
1789 driver.num_ioctls = i915_max_ioctl;
1792 * Enable KMS by default, unless explicitly overriden by
1793 * either the i915.modeset prarameter or by the
1794 * vga_text_mode_force boot option.
1797 if (i915.modeset == 0)
1798 driver.driver_features &= ~DRIVER_MODESET;
1800 if (vgacon_text_force() && i915.modeset == -1)
1801 driver.driver_features &= ~DRIVER_MODESET;
1803 if (!(driver.driver_features & DRIVER_MODESET)) {
1804 /* Silently fail loading to not upset userspace. */
1805 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1809 if (i915.nuclear_pageflip)
1810 driver.driver_features |= DRIVER_ATOMIC;
1812 return drm_pci_init(&driver, &i915_pci_driver);
1815 static void __exit i915_exit(void)
1817 if (!(driver.driver_features & DRIVER_MODESET))
1818 return; /* Never loaded a driver. */
1820 drm_pci_exit(&driver, &i915_pci_driver);
1823 module_init(i915_init);
1824 module_exit(i915_exit);
1826 MODULE_AUTHOR("Tungsten Graphics, Inc.");
1827 MODULE_AUTHOR("Intel Corporation");
1829 MODULE_DESCRIPTION(DRIVER_DESC);
1830 MODULE_LICENSE("GPL and additional rights");