Merge tag 'v3.14-rc6' into drm-intel-next-queued
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 10 Mar 2014 20:43:46 +0000 (21:43 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 10 Mar 2014 20:43:46 +0000 (21:43 +0100)
Linux 3.14-rc6

I need the hdmi/dvi-dual link fixes in 3.14 to avoid ugly conflicts
when merging Ville's new hdmi cloning support into my -next tree

Conflicts:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/intel_dp.c

Makefile cleanup conflicts with an acpi build fix, intel_dp.c is
trivial.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
14 files changed:
1  2 
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
include/drm/drm_crtc.h

@@@ -3,70 -3,57 +3,69 @@@
  # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
  
  ccflags-y := -Iinclude/drm
 -i915-y := i915_drv.o i915_dma.o i915_irq.o \
 -        i915_gpu_error.o \
 +
 +# Please keep these build lists sorted!
 +
 +# core driver code
 +i915-y := i915_drv.o \
 +        i915_params.o \
            i915_suspend.o \
 -        i915_gem.o \
 +        i915_sysfs.o \
 +        intel_pm.o
 +i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 +i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 +
 +# GEM code
 +i915-y += i915_cmd_parser.o \
          i915_gem_context.o \
          i915_gem_debug.o \
 +        i915_gem_dmabuf.o \
          i915_gem_evict.o \
          i915_gem_execbuffer.o \
          i915_gem_gtt.o \
 +        i915_gem.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
 -        i915_sysfs.o \
 +        i915_gpu_error.o \
 +        i915_irq.o \
          i915_trace_points.o \
 -        i915_ums.o \
 +        intel_ringbuffer.o \
 +        intel_uncore.o
 +
 +# modesetting core code
 +i915-y += intel_bios.o \
          intel_display.o \
 -        intel_crt.o \
 -        intel_lvds.o \
 -        intel_dsi.o \
 -        intel_dsi_cmd.o \
 -        intel_dsi_pll.o \
 -        intel_bios.o \
 -        intel_ddi.o \
 -        intel_dp.o \
 -        intel_hdmi.o \
 -        intel_sdvo.o \
          intel_modes.o \
-         intel_opregion.o \
 -        intel_panel.o \
 -        intel_pm.o \
 -        intel_i2c.o \
 -        intel_tv.o \
 -        intel_dvo.o \
 -        intel_ringbuffer.o \
          intel_overlay.o \
 -        intel_sprite.o \
          intel_sideband.o \
 -        intel_uncore.o \
 +        intel_sprite.o
- i915-$(CONFIG_ACPI)           += intel_acpi.o
++i915-$(CONFIG_ACPI)           += intel_acpi.o intel_opregion.o
 +i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
 +
 +# modesetting output/encoder code
 +i915-y += dvo_ch7017.o \
          dvo_ch7xxx.o \
 -        dvo_ch7017.o \
          dvo_ivch.o \
 -        dvo_tfp410.o \
 -        dvo_sil164.o \
          dvo_ns2501.o \
 -        i915_gem_dmabuf.o
 -
 -i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 -
 -i915-$(CONFIG_ACPI)   += intel_acpi.o intel_opregion.o
 -
 -i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
 +        dvo_sil164.o \
 +        dvo_tfp410.o \
 +        intel_crt.o \
 +        intel_ddi.o \
 +        intel_dp.o \
 +        intel_dsi_cmd.o \
 +        intel_dsi.o \
 +        intel_dsi_pll.o \
 +        intel_dvo.o \
 +        intel_hdmi.o \
 +        intel_i2c.o \
 +        intel_lvds.o \
 +        intel_panel.o \
 +        intel_sdvo.o \
 +        intel_tv.o
  
 -i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 +# legacy horrors
 +i915-y += i915_dma.o \
 +        i915_ums.o
  
  obj-$(CONFIG_DRM_I915)  += i915.o
  
  #include <linux/module.h>
  #include <drm/drm_crtc_helper.h>
  
 -static int i915_modeset __read_mostly = -1;
 -module_param_named(modeset, i915_modeset, int, 0400);
 -MODULE_PARM_DESC(modeset,
 -              "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
 -              "1=on, -1=force vga console preference [default])");
 -
 -unsigned int i915_fbpercrtc __always_unused = 0;
 -module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 -
 -int i915_panel_ignore_lid __read_mostly = 1;
 -module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
 -MODULE_PARM_DESC(panel_ignore_lid,
 -              "Override lid status (0=autodetect, 1=autodetect disabled [default], "
 -              "-1=force lid closed, -2=force lid open)");
 -
 -unsigned int i915_powersave __read_mostly = 1;
 -module_param_named(powersave, i915_powersave, int, 0600);
 -MODULE_PARM_DESC(powersave,
 -              "Enable powersavings, fbc, downclocking, etc. (default: true)");
 -
 -int i915_semaphores __read_mostly = -1;
 -module_param_named(semaphores, i915_semaphores, int, 0400);
 -MODULE_PARM_DESC(semaphores,
 -              "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 -
 -int i915_enable_rc6 __read_mostly = -1;
 -module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
 -MODULE_PARM_DESC(i915_enable_rc6,
 -              "Enable power-saving render C-state 6. "
 -              "Different stages can be selected via bitmask values "
 -              "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
 -              "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
 -              "default: -1 (use per-chip default)");
 -
 -int i915_enable_fbc __read_mostly = -1;
 -module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
 -MODULE_PARM_DESC(i915_enable_fbc,
 -              "Enable frame buffer compression for power savings "
 -              "(default: -1 (use per-chip default))");
 -
 -unsigned int i915_lvds_downclock __read_mostly = 0;
 -module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 -MODULE_PARM_DESC(lvds_downclock,
 -              "Use panel (LVDS/eDP) downclocking for power savings "
 -              "(default: false)");
 -
 -int i915_lvds_channel_mode __read_mostly;
 -module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
 -MODULE_PARM_DESC(lvds_channel_mode,
 -               "Specify LVDS channel mode "
 -               "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 -
 -int i915_panel_use_ssc __read_mostly = -1;
 -module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
 -MODULE_PARM_DESC(lvds_use_ssc,
 -              "Use Spread Spectrum Clock with panels [LVDS/eDP] "
 -              "(default: auto from VBT)");
 -
 -int i915_vbt_sdvo_panel_type __read_mostly = -1;
 -module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
 -MODULE_PARM_DESC(vbt_sdvo_panel_type,
 -              "Override/Ignore selection of SDVO panel mode in the VBT "
 -              "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 -
 -static bool i915_try_reset __read_mostly = true;
 -module_param_named(reset, i915_try_reset, bool, 0600);
 -MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 -
 -bool i915_enable_hangcheck __read_mostly = true;
 -module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
 -MODULE_PARM_DESC(enable_hangcheck,
 -              "Periodically check GPU activity for detecting hangs. "
 -              "WARNING: Disabling this can cause system wide hangs. "
 -              "(default: true)");
 -
 -int i915_enable_ppgtt __read_mostly = -1;
 -module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
 -MODULE_PARM_DESC(i915_enable_ppgtt,
 -              "Enable PPGTT (default: true)");
 -
 -int i915_enable_psr __read_mostly = 0;
 -module_param_named(enable_psr, i915_enable_psr, int, 0600);
 -MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 -
 -unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
 -module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
 -MODULE_PARM_DESC(preliminary_hw_support,
 -              "Enable preliminary hardware support.");
 -
 -int i915_disable_power_well __read_mostly = 1;
 -module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
 -MODULE_PARM_DESC(disable_power_well,
 -               "Disable the power well when possible (default: true)");
 -
 -int i915_enable_ips __read_mostly = 1;
 -module_param_named(enable_ips, i915_enable_ips, int, 0600);
 -MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 -
 -bool i915_fastboot __read_mostly = 0;
 -module_param_named(fastboot, i915_fastboot, bool, 0600);
 -MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
 -               "(default: false)");
 -
 -int i915_enable_pc8 __read_mostly = 1;
 -module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
 -MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
 -
 -int i915_pc8_timeout __read_mostly = 5000;
 -module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
 -MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
 -
 -bool i915_prefault_disable __read_mostly;
 -module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
 -MODULE_PARM_DESC(prefault_disable,
 -              "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 -
  static struct drm_driver driver;
  
 +#define GEN_DEFAULT_PIPEOFFSETS \
 +      .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
 +                        PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
 +      .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
 +                         TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
 +      .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
 +      .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
 +      .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
 +
 +
  static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_845g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_i85x_info = {
        .has_overlay = 1, .overlay_needs_physical = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_i865g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_i915g_info = {
        .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  static const struct intel_device_info intel_i915gm_info = {
        .gen = 3, .is_mobile = 1, .num_pipes = 2,
        .supports_tv = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  static const struct intel_device_info intel_i945g_info = {
        .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  static const struct intel_device_info intel_i945gm_info = {
        .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
        .supports_tv = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_i965g_info = {
        .has_hotplug = 1,
        .has_overlay = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_i965gm_info = {
        .has_overlay = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_g33_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
        .ring_mask = RENDER_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_g45_info = {
        .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_gm45_info = {
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .supports_tv = 1,
        .ring_mask = RENDER_RING | BSD_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_pineview_info = {
        .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_ironlake_d_info = {
        .gen = 5, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_ironlake_m_info = {
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_sandybridge_d_info = {
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_sandybridge_m_info = {
        .has_fbc = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  #define GEN7_FEATURES  \
  static const struct intel_device_info intel_ivybridge_d_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_ivybridge_m_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .is_mobile = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_ivybridge_q_info = {
        GEN7_FEATURES,
        .is_ivybridge = 1,
        .num_pipes = 0, /* legal, last one wins */
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_valleyview_m_info = {
        .display_mmio_offset = VLV_DISPLAY_BASE,
        .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_valleyview_d_info = {
        .display_mmio_offset = VLV_DISPLAY_BASE,
        .has_fbc = 0, /* legal, last one wins */
        .has_llc = 0, /* legal, last one wins */
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_haswell_d_info = {
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_haswell_m_info = {
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_broadwell_d_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
 +      .has_fbc = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  static const struct intel_device_info intel_broadwell_m_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
 +      .has_fbc = 1,
 +      GEN_DEFAULT_PIPEOFFSETS,
  };
  
  /*
@@@ -326,7 -403,7 +326,7 @@@ MODULE_DEVICE_TABLE(pci, pciidlist)
  void intel_detect_pch(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pch;
+       struct pci_dev *pch = NULL;
  
        /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
         * (which really amounts to a PCH but no South Display).
         * all the ISA bridge devices and check for the first match, instead
         * of only checking the first one.
         */
-       pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
-       while (pch) {
-               struct pci_dev *curr = pch;
+       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
                if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-                       unsigned short id;
-                       id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
                        dev_priv->pch_id = id;
  
                        if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
                                WARN_ON(!IS_HASWELL(dev));
                                WARN_ON(!IS_ULT(dev));
-                       } else {
-                               goto check_next;
-                       }
-                       pci_dev_put(pch);
+                       } else
+                               continue;
                        break;
                }
- check_next:
-               pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
-               pci_dev_put(curr);
        }
        if (!pch)
-               DRM_DEBUG_KMS("No PCH found?\n");
+               DRM_DEBUG_KMS("No PCH found.\n");
+       pci_dev_put(pch);
  }
  
  bool i915_semaphore_is_enabled(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6)
                return false;
  
 +      if (i915.semaphores >= 0)
 +              return i915.semaphores;
 +
        /* Until we get further testing... */
 -      if (IS_GEN8(dev)) {
 -              WARN_ON(!i915_preliminary_hw_support);
 +      if (IS_GEN8(dev))
                return false;
 -      }
 -
 -      if (i915_semaphores >= 0)
 -              return i915_semaphores;
  
  #ifdef CONFIG_INTEL_IOMMU
        /* Enable semaphores on SNB when IO remapping is off */
@@@ -434,7 -508,7 +429,7 @@@ static int i915_drm_freeze(struct drm_d
        /* We do a lot of poking in a lot of registers, make sure they work
         * properly. */
        hsw_disable_package_c8(dev_priv);
 -      intel_display_set_init_power(dev, true);
 +      intel_display_set_init_power(dev_priv, true);
  
        drm_kms_helper_poll_disable(dev);
  
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
        console_unlock();
  
 +      dev_priv->suspend_count++;
 +
        return 0;
  }
  
@@@ -558,7 -630,7 +553,7 @@@ static int __i915_drm_thaw(struct drm_d
                mutex_unlock(&dev->struct_mutex);
        }
  
 -      intel_power_domains_init_hw(dev);
 +      intel_power_domains_init_hw(dev_priv);
  
        i915_restore_state(dev);
        intel_opregion_setup(dev);
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                intel_init_pch_refclk(dev);
 +              drm_mode_config_reset(dev);
  
                mutex_lock(&dev->struct_mutex);
  
                intel_modeset_init_hw(dev);
  
                drm_modeset_lock_all(dev);
 -              drm_mode_config_reset(dev);
                intel_modeset_setup_hw_state(dev, true);
                drm_modeset_unlock_all(dev);
  
@@@ -675,7 -747,7 +670,7 @@@ int i915_reset(struct drm_device *dev
        bool simulated;
        int ret;
  
 -      if (!i915_try_reset)
 +      if (!i915.reset)
                return 0;
  
        mutex_lock(&dev->struct_mutex);
  
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
 +
 +              /* rps/rc6 re-init is necessary to restore state lost after the
 +               * reset and the re-install of drm irq. Skip for ironlake per
 +               * previous concerns that it doesn't respond well to some forms
 +               * of re-init after reset. */
 +              if (INTEL_INFO(dev)->gen > 5) {
 +                      mutex_lock(&dev->struct_mutex);
 +                      intel_enable_gt_powersave(dev);
 +                      mutex_unlock(&dev->struct_mutex);
 +              }
 +
                intel_hpd_init(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
@@@ -754,7 -815,7 +749,7 @@@ static int i915_pci_probe(struct pci_de
        struct intel_device_info *intel_info =
                (struct intel_device_info *) ent->driver_data;
  
 -      if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
 +      if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
                DRM_INFO("This hardware requires preliminary hardware support.\n"
                         "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
                return -ENODEV;
@@@ -849,7 -910,6 +844,7 @@@ static int i915_runtime_suspend(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        WARN_ON(!HAS_RUNTIME_PM(dev));
 +      assert_force_wake_inactive(dev_priv);
  
        DRM_DEBUG_KMS("Suspending device\n");
  
@@@ -986,14 -1046,14 +981,14 @@@ static int __init i915_init(void
         * the default behavior.
         */
  #if defined(CONFIG_DRM_I915_KMS)
 -      if (i915_modeset != 0)
 +      if (i915.modeset != 0)
                driver.driver_features |= DRIVER_MODESET;
  #endif
 -      if (i915_modeset == 1)
 +      if (i915.modeset == 1)
                driver.driver_features |= DRIVER_MODESET;
  
  #ifdef CONFIG_VGA_CONSOLE
 -      if (vgacon_text_force() && i915_modeset == -1)
 +      if (vgacon_text_force() && i915.modeset == -1)
                driver.driver_features &= ~DRIVER_MODESET;
  #endif
  
@@@ -58,8 -58,7 +58,8 @@@ enum pipe 
        PIPE_A = 0,
        PIPE_B,
        PIPE_C,
 -      I915_MAX_PIPES
 +      _PIPE_EDP,
 +      I915_MAX_PIPES = _PIPE_EDP
  };
  #define pipe_name(p) ((p) + 'A')
  
@@@ -67,8 -66,7 +67,8 @@@ enum transcoder 
        TRANSCODER_A = 0,
        TRANSCODER_B,
        TRANSCODER_C,
 -      TRANSCODER_EDP = 0xF,
 +      TRANSCODER_EDP,
 +      I915_MAX_TRANSCODERS
  };
  #define transcoder_name(t) ((t) + 'A')
  
@@@ -79,7 -77,7 +79,7 @@@ enum plane 
  };
  #define plane_name(p) ((p) + 'A')
  
 -#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
 +#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
  
  enum port {
        PORT_A = 0,
@@@ -114,17 -112,6 +114,17 @@@ enum intel_display_power_domain 
        POWER_DOMAIN_TRANSCODER_B,
        POWER_DOMAIN_TRANSCODER_C,
        POWER_DOMAIN_TRANSCODER_EDP,
 +      POWER_DOMAIN_PORT_DDI_A_2_LANES,
 +      POWER_DOMAIN_PORT_DDI_A_4_LANES,
 +      POWER_DOMAIN_PORT_DDI_B_2_LANES,
 +      POWER_DOMAIN_PORT_DDI_B_4_LANES,
 +      POWER_DOMAIN_PORT_DDI_C_2_LANES,
 +      POWER_DOMAIN_PORT_DDI_C_4_LANES,
 +      POWER_DOMAIN_PORT_DDI_D_2_LANES,
 +      POWER_DOMAIN_PORT_DDI_D_4_LANES,
 +      POWER_DOMAIN_PORT_DSI,
 +      POWER_DOMAIN_PORT_CRT,
 +      POWER_DOMAIN_PORT_OTHER,
        POWER_DOMAIN_VGA,
        POWER_DOMAIN_AUDIO,
        POWER_DOMAIN_INIT,
        POWER_DOMAIN_NUM,
  };
  
 -#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 -
  #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
  #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
                ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
        ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
         (tran) + POWER_DOMAIN_TRANSCODER_A)
  
 -#define HSW_ALWAYS_ON_POWER_DOMAINS (         \
 -      BIT(POWER_DOMAIN_PIPE_A) |              \
 -      BIT(POWER_DOMAIN_TRANSCODER_EDP))
 -#define BDW_ALWAYS_ON_POWER_DOMAINS (         \
 -      BIT(POWER_DOMAIN_PIPE_A) |              \
 -      BIT(POWER_DOMAIN_TRANSCODER_EDP) |      \
 -      BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 -
  enum hpd_pin {
        HPD_NONE = 0,
        HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
         I915_GEM_DOMAIN_VERTEX)
  
  #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
 +#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
  
  #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
  
 +#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
 +      list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
 +              if ((intel_connector)->base.encoder == (__encoder))
 +
  struct drm_i915_private;
  
  enum intel_dpll_id {
@@@ -303,87 -295,53 +303,87 @@@ struct intel_display_error_state
  
  struct drm_i915_error_state {
        struct kref ref;
 +      struct timeval time;
 +
 +      char error_msg[128];
 +      u32 reset_count;
 +      u32 suspend_count;
 +
 +      /* Generic register state */
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
 -      bool waiting[I915_NUM_RINGS];
 -      u32 pipestat[I915_MAX_PIPES];
 -      u32 tail[I915_NUM_RINGS];
 -      u32 head[I915_NUM_RINGS];
 -      u32 ctl[I915_NUM_RINGS];
 -      u32 ipeir[I915_NUM_RINGS];
 -      u32 ipehr[I915_NUM_RINGS];
 -      u32 instdone[I915_NUM_RINGS];
 -      u32 acthd[I915_NUM_RINGS];
 -      u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
 -      u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
 -      u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
 -      /* our own tracking of ring head and tail */
 -      u32 cpu_ring_head[I915_NUM_RINGS];
 -      u32 cpu_ring_tail[I915_NUM_RINGS];
        u32 error; /* gen6+ */
        u32 err_int; /* gen7 */
 -      u32 bbstate[I915_NUM_RINGS];
 -      u32 instpm[I915_NUM_RINGS];
 -      u32 instps[I915_NUM_RINGS];
 -      u32 extra_instdone[I915_NUM_INSTDONE_REG];
 -      u32 seqno[I915_NUM_RINGS];
 -      u64 bbaddr[I915_NUM_RINGS];
 -      u32 fault_reg[I915_NUM_RINGS];
        u32 done_reg;
 -      u32 faddr[I915_NUM_RINGS];
 +      u32 gac_eco;
 +      u32 gam_ecochk;
 +      u32 gab_ctl;
 +      u32 gfx_mode;
 +      u32 extra_instdone[I915_NUM_INSTDONE_REG];
 +      u32 pipestat[I915_MAX_PIPES];
        u64 fence[I915_MAX_NUM_FENCES];
 -      struct timeval time;
 +      struct intel_overlay_error_state *overlay;
 +      struct intel_display_error_state *display;
 +
        struct drm_i915_error_ring {
                bool valid;
 +              /* Software tracked state */
 +              bool waiting;
 +              int hangcheck_score;
 +              enum intel_ring_hangcheck_action hangcheck_action;
 +              int num_requests;
 +
 +              /* our own tracking of ring head and tail */
 +              u32 cpu_ring_head;
 +              u32 cpu_ring_tail;
 +
 +              u32 semaphore_seqno[I915_NUM_RINGS - 1];
 +
 +              /* Register state */
 +              u32 tail;
 +              u32 head;
 +              u32 ctl;
 +              u32 hws;
 +              u32 ipeir;
 +              u32 ipehr;
 +              u32 instdone;
 +              u32 acthd;
 +              u32 bbstate;
 +              u32 instpm;
 +              u32 instps;
 +              u32 seqno;
 +              u64 bbaddr;
 +              u32 fault_reg;
 +              u32 faddr;
 +              u32 rc_psmi; /* sleep state */
 +              u32 semaphore_mboxes[I915_NUM_RINGS - 1];
 +
                struct drm_i915_error_object {
                        int page_count;
                        u32 gtt_offset;
                        u32 *pages[0];
 -              } *ringbuffer, *batchbuffer, *ctx;
 +              } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 +
                struct drm_i915_error_request {
                        long jiffies;
                        u32 seqno;
                        u32 tail;
                } *requests;
 -              int num_requests;
 +
 +              struct {
 +                      u32 gfx_mode;
 +                      union {
 +                              u64 pdp[4];
 +                              u32 pp_dir_base;
 +                      };
 +              } vm_info;
 +
 +              pid_t pid;
 +              char comm[TASK_COMM_LEN];
        } ring[I915_NUM_RINGS];
        struct drm_i915_error_buffer {
                u32 size;
                s32 ring:4;
                u32 cache_level:3;
        } **active_bo, **pinned_bo;
 +
        u32 *active_bo_count, *pinned_bo_count;
 -      struct intel_overlay_error_state *overlay;
 -      struct intel_display_error_state *display;
 -      int hangcheck_score[I915_NUM_RINGS];
 -      enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  };
  
  struct intel_connector;
  struct intel_crtc_config;
 +struct intel_plane_config;
  struct intel_crtc;
  struct intel_limit;
  struct dpll;
@@@ -445,8 -405,6 +445,8 @@@ struct drm_i915_display_funcs 
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
 +      void (*get_plane_config)(struct intel_crtc *,
 +                               struct intel_plane_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@@ -511,7 -469,7 +511,7 @@@ struct intel_uncore 
        unsigned fw_rendercount;
        unsigned fw_mediacount;
  
 -      struct delayed_work force_wake_work;
 +      struct timer_list force_wake_timer;
  };
  
  #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
  struct intel_device_info {
        u32 display_mmio_offset;
        u8 num_pipes:3;
 +      u8 num_sprites[I915_MAX_PIPES];
        u8 gen;
        u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 +      /* Register offsets for the various display pipes and transcoders */
 +      int pipe_offsets[I915_MAX_TRANSCODERS];
 +      int trans_offsets[I915_MAX_TRANSCODERS];
 +      int dpll_offsets[I915_MAX_PIPES];
 +      int dpll_md_offsets[I915_MAX_PIPES];
 +      int palette_offsets[I915_MAX_PIPES];
  };
  
  #undef DEFINE_FLAG
@@@ -573,57 -524,6 +573,57 @@@ enum i915_cache_level 
  
  typedef uint32_t gen6_gtt_pte_t;
  
 +/**
 + * A VMA represents a GEM BO that is bound into an address space. Therefore, a
 + * VMA's presence cannot be guaranteed before binding, or after unbinding the
 + * object into/from the address space.
 + *
 + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
 + * will always be <= an objects lifetime. So object refcounting should cover us.
 + */
 +struct i915_vma {
 +      struct drm_mm_node node;
 +      struct drm_i915_gem_object *obj;
 +      struct i915_address_space *vm;
 +
 +      /** This object's place on the active/inactive lists */
 +      struct list_head mm_list;
 +
 +      struct list_head vma_link; /* Link in the object's VMA list */
 +
 +      /** This vma's place in the batchbuffer or on the eviction list */
 +      struct list_head exec_list;
 +
 +      /**
 +       * Used for performing relocations during execbuffer insertion.
 +       */
 +      struct hlist_node exec_node;
 +      unsigned long exec_handle;
 +      struct drm_i915_gem_exec_object2 *exec_entry;
 +
 +      /**
 +       * How many users have pinned this object in GTT space. The following
 +       * users can each hold at most one reference: pwrite/pread, pin_ioctl
 +       * (via user_pin_count), execbuffer (objects are not allowed multiple
 +       * times for the same batchbuffer), and the framebuffer code. When
 +       * switching/pageflipping, the framebuffer code has at most two buffers
 +       * pinned per crtc.
 +       *
 +       * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
 +       * bits with absolutely no headroom. So use 4 bits. */
 +      unsigned int pin_count:4;
 +#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 +
 +      /** Unmap an object from an address space. This usually consists of
 +       * setting the valid PTE entries to a reserved scratch page. */
 +      void (*unbind_vma)(struct i915_vma *vma);
 +      /* Map an object into an address space with the given cache flags. */
 +#define GLOBAL_BIND (1<<0)
 +      void (*bind_vma)(struct i915_vma *vma,
 +                       enum i915_cache_level cache_level,
 +                       u32 flags);
 +};
 +
  struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
                                     enum i915_cache_level level,
                                     bool valid); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
 -                          unsigned int first_entry,
 -                          unsigned int num_entries,
 +                          uint64_t start,
 +                          uint64_t length,
                            bool use_scratch);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
 -                             unsigned int first_entry,
 +                             uint64_t start,
                               enum i915_cache_level cache_level);
        void (*cleanup)(struct i915_address_space *vm);
  };
@@@ -703,32 -603,55 +703,32 @@@ struct i915_gtt 
  };
  #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
  
 +#define GEN8_LEGACY_PDPS 4
  struct i915_hw_ppgtt {
        struct i915_address_space base;
 +      struct kref ref;
 +      struct drm_mm_node node;
        unsigned num_pd_entries;
 +      unsigned num_pd_pages; /* gen8+ */
        union {
                struct page **pt_pages;
 -              struct page *gen8_pt_pages;
 +              struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
        };
        struct page *pd_pages;
 -      int num_pd_pages;
 -      int num_pt_pages;
        union {
                uint32_t pd_offset;
 -              dma_addr_t pd_dma_addr[4];
 +              dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
        };
        union {
                dma_addr_t *pt_dma_addr;
                dma_addr_t *gen8_pt_dma_addr[4];
        };
 -      int (*enable)(struct drm_device *dev);
 -};
 -
 -/**
 - * A VMA represents a GEM BO that is bound into an address space. Therefore, a
 - * VMA's presence cannot be guaranteed before binding, or after unbinding the
 - * object into/from the address space.
 - *
 - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
 - * will always be <= an objects lifetime. So object refcounting should cover us.
 - */
 -struct i915_vma {
 -      struct drm_mm_node node;
 -      struct drm_i915_gem_object *obj;
 -      struct i915_address_space *vm;
 -
 -      /** This object's place on the active/inactive lists */
 -      struct list_head mm_list;
 -
 -      struct list_head vma_link; /* Link in the object's VMA list */
 -
 -      /** This vma's place in the batchbuffer or on the eviction list */
 -      struct list_head exec_list;
 -
 -      /**
 -       * Used for performing relocations during execbuffer insertion.
 -       */
 -      struct hlist_node exec_node;
 -      unsigned long exec_handle;
 -      struct drm_i915_gem_exec_object2 *exec_entry;
  
 +      int (*enable)(struct i915_hw_ppgtt *ppgtt);
 +      int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
 +                       struct intel_ring_buffer *ring,
 +                       bool synchronous);
 +      void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
  };
  
  struct i915_ctx_hang_stats {
@@@ -753,10 -676,9 +753,10 @@@ struct i915_hw_context 
        bool is_initialized;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
 -      struct intel_ring_buffer *ring;
 +      struct intel_ring_buffer *last_ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
 +      struct i915_address_space *vm;
  
        struct list_head link;
  };
@@@ -909,7 -831,11 +909,7 @@@ struct i915_suspend_saved_registers 
        u32 savePFIT_CONTROL;
        u32 save_palette_a[256];
        u32 save_palette_b[256];
 -      u32 saveDPFC_CB_BASE;
 -      u32 saveFBC_CFB_BASE;
 -      u32 saveFBC_LL_BASE;
        u32 saveFBC_CONTROL;
 -      u32 saveFBC_CONTROL2;
        u32 saveIER;
        u32 saveIIR;
        u32 saveIMR;
@@@ -979,6 -905,8 +979,6 @@@ struct intel_gen6_power_mgmt 
        struct work_struct work;
        u32 pm_iir;
  
 -      /* The below variables an all the rps hw state are protected by
 -       * dev->struct mutext. */
        u8 cur_delay;
        u8 min_delay;
        u8 max_delay;
        u8 rp0_delay;
        u8 hw_max;
  
 +      bool rp_up_masked;
 +      bool rp_down_masked;
 +
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
  
@@@ -1028,36 -953,6 +1028,36 @@@ struct intel_ilk_power_mgmt 
        struct drm_i915_gem_object *renderctx;
  };
  
 +struct drm_i915_private;
 +struct i915_power_well;
 +
 +struct i915_power_well_ops {
 +      /*
 +       * Synchronize the well's hw state to match the current sw state, for
 +       * example enable/disable it based on the current refcount. Called
 +       * during driver init and resume time, possibly after first calling
 +       * the enable/disable handlers.
 +       */
 +      void (*sync_hw)(struct drm_i915_private *dev_priv,
 +                      struct i915_power_well *power_well);
 +      /*
 +       * Enable the well and resources that depend on it (for example
 +       * interrupts located on the well). Called after the 0->1 refcount
 +       * transition.
 +       */
 +      void (*enable)(struct drm_i915_private *dev_priv,
 +                     struct i915_power_well *power_well);
 +      /*
 +       * Disable the well and resources that depend on it. Called after
 +       * the 1->0 refcount transition.
 +       */
 +      void (*disable)(struct drm_i915_private *dev_priv,
 +                      struct i915_power_well *power_well);
 +      /* Returns the hw enabled state. */
 +      bool (*is_enabled)(struct drm_i915_private *dev_priv,
 +                         struct i915_power_well *power_well);
 +};
 +
  /* Power well structure for haswell */
  struct i915_power_well {
        const char *name;
        /* power well enable/disable usage count */
        int count;
        unsigned long domains;
 -      void *data;
 -      void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
 -                  bool enable);
 -      bool (*is_enabled)(struct drm_device *dev,
 -                         struct i915_power_well *power_well);
 +      unsigned long data;
 +      const struct i915_power_well_ops *ops;
  };
  
  struct i915_power_domains {
@@@ -1163,14 -1061,6 +1163,14 @@@ struct i915_gem_mm 
         */
        bool interruptible;
  
 +      /**
 +       * Is the GPU currently considered idle, or busy executing userspace
 +       * requests?  Whilst idle, we attempt to power down the hardware and
 +       * display clocks. In order to reduce the effect on performance, there
 +       * is a slight delay before we do so.
 +       */
 +      bool busy;
 +
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@@ -1360,10 -1250,11 +1360,10 @@@ struct ilk_wm_values 
   * Ideally every piece of our code that needs PC8+ disabled would call
   * hsw_disable_package_c8, which would increment disable_count and prevent the
   * system from reaching PC8+. But we don't have a symmetric way to do this for
 - * everything, so we have the requirements_met and gpu_idle variables. When we
 - * switch requirements_met or gpu_idle to true we decrease disable_count, and
 - * increase it in the opposite case. The requirements_met variable is true when
 - * all the CRTCs, encoders and the power well are disabled. The gpu_idle
 - * variable is true when the GPU is idle.
 + * everything, so we have the requirements_met variable. When we switch
 + * requirements_met to true we decrease disable_count, and increase it in the
 + * opposite case. The requirements_met variable is true when all the CRTCs,
 + * encoders and the power well are disabled.
   *
   * In addition to everything, we only actually enable PC8+ if disable_count
   * stays at zero for at least some seconds. This is implemented with the
   */
  struct i915_package_c8 {
        bool requirements_met;
 -      bool gpu_idle;
        bool irqs_disabled;
        /* Only true after the delayed work task actually enables it. */
        bool enabled;
@@@ -1440,7 -1332,7 +1440,7 @@@ typedef struct drm_i915_private 
        struct drm_device *dev;
        struct kmem_cache *slab;
  
 -      const struct intel_device_info *info;
 +      const struct intel_device_info info;
  
        int relative_constants_mode;
  
        drm_dma_handle_t *status_page_dmah;
        struct resource mch_res;
  
 -      atomic_t irq_received;
 -
        /* protects the irq masks */
        spinlock_t irq_lock;
  
 +      bool display_irqs_enabled;
 +
        /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
        struct pm_qos_request pm_qos;
  
        };
        u32 gt_irq_mask;
        u32 pm_irq_mask;
 +      u32 pipestat_irq_mask[I915_MAX_PIPES];
  
        struct work_struct hotplug_work;
        bool enable_hotplug_processing;
        u32 hpd_event_bits;
        struct timer_list hotplug_reenable_timer;
  
 -      int num_plane;
 -
        struct i915_fbc fbc;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
  
        struct sdvo_device_mapping sdvo_mappings[2];
  
 -      struct drm_crtc *plane_to_crtc_mapping[3];
 -      struct drm_crtc *pipe_to_crtc_mapping[3];
 +      struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
 +      struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
        wait_queue_head_t pending_flip_queue;
  
  #ifdef CONFIG_DEBUG_FS
  
        u32 fdi_rx_config;
  
 +      u32 suspend_count;
        struct i915_suspend_saved_registers regfile;
  
        struct {
@@@ -1735,6 -1627,18 +1735,6 @@@ struct drm_i915_gem_object 
         */
        unsigned int fence_dirty:1;
  
 -      /** How many users have pinned this object in GTT space. The following
 -       * users can each hold at most one reference: pwrite/pread, pin_ioctl
 -       * (via user_pin_count), execbuffer (objects are not allowed multiple
 -       * times for the same batchbuffer), and the framebuffer code. When
 -       * switching/pageflipping, the framebuffer code has at most two buffers
 -       * pinned per crtc.
 -       *
 -       * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
 -       * bits with absolutely no headroom. So use 4 bits. */
 -      unsigned int pin_count:4;
 -#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 -
        /**
         * Is the object at the current location in the gtt mappable and
         * fenceable? Used to avoid costly recalculations.
        /** for phy allocated objects */
        struct drm_i915_gem_phys_object *phys_obj;
  };
 -#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
  
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
@@@ -1838,7 -1743,6 +1838,7 @@@ struct drm_i915_gem_request 
  
  struct drm_i915_file_private {
        struct drm_i915_private *dev_priv;
 +      struct drm_file *file;
  
        struct {
                spinlock_t lock;
        } mm;
        struct idr context_idr;
  
 -      struct i915_ctx_hang_stats hang_stats;
 +      struct i915_hw_context *private_default_ctx;
        atomic_t rps_wait_boost;
  };
  
 -#define INTEL_INFO(dev)       (to_i915(dev)->info)
 +/*
 + * A command that requires special handling by the command parser.
 + */
 +struct drm_i915_cmd_descriptor {
 +      /*
 +       * Flags describing how the command parser processes the command.
 +       *
 +       * CMD_DESC_FIXED: The command has a fixed length if this is set,
 +       *                 a length mask if not set
 +       * CMD_DESC_SKIP: The command is allowed but does not follow the
 +       *                standard length encoding for the opcode range in
 +       *                which it falls
 +       * CMD_DESC_REJECT: The command is never allowed
 +       * CMD_DESC_REGISTER: The command should be checked against the
 +       *                    register whitelist for the appropriate ring
 +       * CMD_DESC_MASTER: The command is allowed if the submitting process
 +       *                  is the DRM master
 +       */
 +      u32 flags;
 +#define CMD_DESC_FIXED    (1<<0)
 +#define CMD_DESC_SKIP     (1<<1)
 +#define CMD_DESC_REJECT   (1<<2)
 +#define CMD_DESC_REGISTER (1<<3)
 +#define CMD_DESC_BITMASK  (1<<4)
 +#define CMD_DESC_MASTER   (1<<5)
 +
 +      /*
 +       * The command's unique identification bits and the bitmask to get them.
 +       * This isn't strictly the opcode field as defined in the spec and may
 +       * also include type, subtype, and/or subop fields.
 +       */
 +      struct {
 +              u32 value;
 +              u32 mask;
 +      } cmd;
 +
 +      /*
 +       * The command's length. The command is either fixed length (i.e. does
 +       * not include a length field) or has a length field mask. The flag
 +       * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
 +       * a length mask. All command entries in a command table must include
 +       * length information.
 +       */
 +      union {
 +              u32 fixed;
 +              u32 mask;
 +      } length;
 +
 +      /*
 +       * Describes where to find a register address in the command to check
 +       * against the ring's register whitelist. Only valid if flags has the
 +       * CMD_DESC_REGISTER bit set.
 +       */
 +      struct {
 +              u32 offset;
 +              u32 mask;
 +      } reg;
 +
 +#define MAX_CMD_DESC_BITMASKS 3
 +      /*
 +       * Describes command checks where a particular dword is masked and
 +       * compared against an expected value. If the command does not match
 +       * the expected value, the parser rejects it. Only valid if flags has
 +       * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
 +       * are valid.
 +       */
 +      struct {
 +              u32 offset;
 +              u32 mask;
 +              u32 expected;
 +      } bits[MAX_CMD_DESC_BITMASKS];
 +};
 +
 +/*
 + * A table of commands requiring special handling by the command parser.
 + *
 + * Each ring has an array of tables. Each table consists of an array of command
 + * descriptors, which must be sorted with command opcodes in ascending order.
 + */
 +struct drm_i915_cmd_table {
 +      const struct drm_i915_cmd_descriptor *table;
 +      int count;
 +};
 +
 +#define INTEL_INFO(dev)       (&to_i915(dev)->info)
  
  #define IS_I830(dev)          ((dev)->pdev->device == 0x3577)
  #define IS_845G(dev)          ((dev)->pdev->device == 0x2562)
  #define I915_NEED_GFX_HWS(dev)        (INTEL_INFO(dev)->need_gfx_hws)
  
  #define HAS_HW_CONTEXTS(dev)  (INTEL_INFO(dev)->gen >= 6)
 -#define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
 +#define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
 +#define HAS_PPGTT(dev)                (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
 +                               && !IS_BROADWELL(dev))
 +#define USES_PPGTT(dev)               intel_enable_ppgtt(dev, false)
 +#define USES_FULL_PPGTT(dev)  intel_enable_ppgtt(dev, true)
  
  #define HAS_OVERLAY(dev)              (INTEL_INFO(dev)->has_overlay)
  #define OVERLAY_NEEDS_PHYSICAL(dev)   (INTEL_INFO(dev)->overlay_needs_physical)
  
  /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  #define HAS_BROKEN_CS_TLB(dev)                (IS_I830(dev) || IS_845G(dev))
+ /*
+  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+  * even when in MSI mode. This results in spurious interrupt warnings if the
+  * legacy irq no. is shared with another device. The kernel then disables that
+  * interrupt source and so prevents the other device from working properly.
+  */
+ #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+ #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
  
  /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
   * rows, which changed the alignment requirements and fence programming.
  
  extern const struct drm_ioctl_desc i915_ioctls[];
  extern int i915_max_ioctl;
 -extern unsigned int i915_fbpercrtc __always_unused;
 -extern int i915_panel_ignore_lid __read_mostly;
 -extern unsigned int i915_powersave __read_mostly;
 -extern int i915_semaphores __read_mostly;
 -extern unsigned int i915_lvds_downclock __read_mostly;
 -extern int i915_lvds_channel_mode __read_mostly;
 -extern int i915_panel_use_ssc __read_mostly;
 -extern int i915_vbt_sdvo_panel_type __read_mostly;
 -extern int i915_enable_rc6 __read_mostly;
 -extern int i915_enable_fbc __read_mostly;
 -extern bool i915_enable_hangcheck __read_mostly;
 -extern int i915_enable_ppgtt __read_mostly;
 -extern int i915_enable_psr __read_mostly;
 -extern unsigned int i915_preliminary_hw_support __read_mostly;
 -extern int i915_disable_power_well __read_mostly;
 -extern int i915_enable_ips __read_mostly;
 -extern bool i915_fastboot __read_mostly;
 -extern int i915_enable_pc8 __read_mostly;
 -extern int i915_pc8_timeout __read_mostly;
 -extern bool i915_prefault_disable __read_mostly;
  
  extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  extern int i915_resume(struct drm_device *dev);
  extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
  extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
  
 +/* i915_params.c */
 +struct i915_params {
 +      int modeset;
 +      int panel_ignore_lid;
 +      unsigned int powersave;
 +      int semaphores;
 +      unsigned int lvds_downclock;
 +      int lvds_channel_mode;
 +      int panel_use_ssc;
 +      int vbt_sdvo_panel_type;
 +      int enable_rc6;
 +      int enable_fbc;
 +      int enable_ppgtt;
 +      int enable_psr;
 +      unsigned int preliminary_hw_support;
 +      int disable_power_well;
 +      int enable_ips;
 +      int enable_pc8;
 +      int pc8_timeout;
 +      int invert_brightness;
 +      int enable_cmd_parser;
 +      /* leave bools at the end to not create holes */
 +      bool enable_hangcheck;
 +      bool fastboot;
 +      bool prefault_disable;
 +      bool reset;
 +      bool disable_display;
 +};
 +extern struct i915_params i915 __read_mostly;
 +
                                /* i915_dma.c */
  void i915_update_dri1_breadcrumb(struct drm_device *dev);
  extern void i915_kernel_lost_context(struct drm_device * dev);
@@@ -2129,12 -1943,8 +2137,12 @@@ extern void intel_console_resume(struc
  
  /* i915_irq.c */
  void i915_queue_hangcheck(struct drm_device *dev);
 -void i915_handle_error(struct drm_device *dev, bool wedged);
 +__printf(3, 4)
 +void i915_handle_error(struct drm_device *dev, bool wedged,
 +                     const char *fmt, ...);
  
 +void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
 +                                                      int new_delay);
  extern void intel_irq_init(struct drm_device *dev);
  extern void intel_hpd_init(struct drm_device *dev);
  
@@@ -2145,15 -1955,10 +2153,15 @@@ extern void intel_uncore_check_errors(s
  extern void intel_uncore_fini(struct drm_device *dev);
  
  void
 -i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 +i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
 +                   u32 status_mask);
  
  void
 -i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 +i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
 +                    u32 status_mask);
 +
 +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
 +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
  
  /* i915_gem.c */
  int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@@ -2209,27 -2014,22 +2217,27 @@@ void i915_gem_object_init(struct drm_i9
                         const struct drm_i915_gem_object_ops *ops);
  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 +void i915_init_vm(struct drm_i915_private *dev_priv,
 +                struct i915_address_space *vm);
  void i915_gem_free_object(struct drm_gem_object *obj);
  void i915_gem_vma_destroy(struct i915_vma *vma);
  
 +#define PIN_MAPPABLE 0x1
 +#define PIN_NONBLOCK 0x2
 +#define PIN_GLOBAL 0x4
  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm,
                                     uint32_t alignment,
 -                                   bool map_and_fenceable,
 -                                   bool nonblocking);
 -void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
 +                                   unsigned flags);
  int __must_check i915_vma_unbind(struct i915_vma *vma);
 -int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
  void i915_gem_lastclose(struct drm_device *dev);
  
 +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 +                                  int *needs_clflush);
 +
  int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
  static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
  {
@@@ -2296,10 -2096,8 +2304,10 @@@ i915_gem_object_unpin_fence(struct drm_
        }
  }
  
 +struct drm_i915_gem_request *
 +i915_gem_find_active_request(struct intel_ring_buffer *ring);
 +
  bool i915_gem_retire_requests(struct drm_device *dev);
 -void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
  int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
@@@ -2388,13 -2186,6 +2396,13 @@@ i915_gem_obj_lookup_or_create_vma(struc
                                  struct i915_address_space *vm);
  
  struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
 +static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
 +      struct i915_vma *vma;
 +      list_for_each_entry(vma, &obj->vma_list, vma_link)
 +              if (vma->pin_count > 0)
 +                      return true;
 +      return false;
 +}
  
  /* Some GGTT VM helpers */
  #define obj_to_ggtt(obj) \
@@@ -2426,69 -2217,54 +2434,69 @@@ i915_gem_obj_ggtt_size(struct drm_i915_
  static inline int __must_check
  i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                      uint32_t alignment,
 -                    bool map_and_fenceable,
 -                    bool nonblocking)
 +                    unsigned flags)
 +{
 +      return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
 +}
 +
 +static inline int
 +i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
  {
 -      return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
 -                                 map_and_fenceable, nonblocking);
 +      return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
  }
  
 +void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 +
  /* i915_gem_context.c */
 +#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
  int __must_check i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
 +void i915_gem_context_reset(struct drm_device *dev);
 +int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 +int i915_gem_context_enable(struct drm_i915_private *dev_priv);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct intel_ring_buffer *ring,
 -                      struct drm_file *file, int to_id);
 +                      struct drm_file *file, struct i915_hw_context *to);
 +struct i915_hw_context *
 +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
  void i915_gem_context_free(struct kref *ctx_ref);
  static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
  {
 -      kref_get(&ctx->ref);
 +      if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
 +              kref_get(&ctx->ref);
  }
  
  static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
  {
 -      kref_put(&ctx->ref, i915_gem_context_free);
 +      if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
 +              kref_put(&ctx->ref, i915_gem_context_free);
 +}
 +
 +static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
 +{
 +      return c->id == DEFAULT_CONTEXT_ID;
  }
  
 -struct i915_ctx_hang_stats * __must_check
 -i915_gem_context_get_hang_stats(struct drm_device *dev,
 -                              struct drm_file *file,
 -                              u32 id);
  int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
  int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file);
  
 -/* i915_gem_gtt.c */
 -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
 -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 -                          struct drm_i915_gem_object *obj,
 -                          enum i915_cache_level cache_level);
 -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 -                            struct drm_i915_gem_object *obj);
 +/* i915_gem_evict.c */
 +int __must_check i915_gem_evict_something(struct drm_device *dev,
 +                                        struct i915_address_space *vm,
 +                                        int min_size,
 +                                        unsigned alignment,
 +                                        unsigned cache_level,
 +                                        unsigned flags);
 +int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 +int i915_gem_evict_everything(struct drm_device *dev);
  
 +/* i915_gem_gtt.c */
  void i915_check_and_clear_faults(struct drm_device *dev);
  void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
  void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 -                              enum i915_cache_level cache_level);
 -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
  void i915_gem_init_global_gtt(struct drm_device *dev);
  void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@@ -2499,8 -2275,18 +2507,8 @@@ static inline void i915_gem_chipset_flu
        if (INTEL_INFO(dev)->gen < 6)
                intel_gtt_chipset_flush();
  }
 -
 -
 -/* i915_gem_evict.c */
 -int __must_check i915_gem_evict_something(struct drm_device *dev,
 -                                        struct i915_address_space *vm,
 -                                        int min_size,
 -                                        unsigned alignment,
 -                                        unsigned cache_level,
 -                                        bool mappable,
 -                                        bool nonblock);
 -int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 -int i915_gem_evict_everything(struct drm_device *dev);
 +int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 +bool intel_enable_ppgtt(struct drm_device *dev, bool full);
  
  /* i915_gem_stolen.c */
  int i915_gem_init_stolen(struct drm_device *dev);
@@@ -2557,8 -2343,7 +2565,8 @@@ static inline void i915_error_state_buf
  {
        kfree(eb->buf);
  }
 -void i915_capture_error_state(struct drm_device *dev);
 +void i915_capture_error_state(struct drm_device *dev, bool wedge,
 +                            const char *error_msg);
  void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv);
  void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
@@@ -2567,14 -2352,6 +2575,14 @@@ void i915_destroy_error_state(struct dr
  void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
  const char *i915_cache_level_str(int type);
  
 +/* i915_cmd_parser.c */
 +void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
 +bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
 +int i915_parse_cmds(struct intel_ring_buffer *ring,
 +                  struct drm_i915_gem_object *batch_obj,
 +                  u32 batch_start_offset,
 +                  bool is_master);
 +
  /* i915_suspend.c */
  extern int i915_save_state(struct drm_device *dev);
  extern int i915_restore_state(struct drm_device *dev);
@@@ -2607,8 -2384,8 +2615,8 @@@ extern void intel_i2c_reset(struct drm_
  
  /* intel_opregion.c */
  struct intel_encoder;
- extern int intel_opregion_setup(struct drm_device *dev);
  #ifdef CONFIG_ACPI
+ extern int intel_opregion_setup(struct drm_device *dev);
  extern void intel_opregion_init(struct drm_device *dev);
  extern void intel_opregion_fini(struct drm_device *dev);
  extern void intel_opregion_asle_intr(struct drm_device *dev);
@@@ -2617,6 -2394,7 +2625,7 @@@ extern int intel_opregion_notify_encode
  extern int intel_opregion_notify_adapter(struct drm_device *dev,
                                         pci_power_t state);
  #else
+ static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
  static inline void intel_opregion_init(struct drm_device *dev) { return; }
  static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
@@@ -2647,12 -2425,10 +2656,12 @@@ extern void intel_modeset_suspend_hw(st
  extern void intel_modeset_init(struct drm_device *dev);
  extern void intel_modeset_gem_init(struct drm_device *dev);
  extern void intel_modeset_cleanup(struct drm_device *dev);
 +extern void intel_connector_unregister(struct intel_connector *);
  extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
  extern void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         bool force_restore);
  extern void i915_redisable_vga(struct drm_device *dev);
 +extern void i915_redisable_vga_power_on(struct drm_device *dev);
  extern bool intel_fbc_enabled(struct drm_device *dev);
  extern void intel_disable_fbc(struct drm_device *dev);
  extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@@ -2687,7 -2463,6 +2696,7 @@@ extern void intel_display_print_error_s
   */
  void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
  void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 +void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
  
  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
  int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@@ -2791,31 -2566,4 +2800,31 @@@ timespec_to_jiffies_timeout(const struc
        return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  }
  
 +/*
 + * If you need to wait X milliseconds between events A and B, but event B
 + * doesn't happen exactly after event A, you record the timestamp (jiffies) of
 + * when event A happened, then just before event B you call this function and
 + * pass the timestamp as the first argument, and X as the second argument.
 + */
 +static inline void
 +wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 +{
 +      unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
 +
 +      /*
 +       * Don't re-read the value of "jiffies" every time since it may change
 +       * behind our back and break the math.
 +       */
 +      tmp_jiffies = jiffies;
 +      target_jiffies = timestamp_jiffies +
 +                       msecs_to_jiffies_timeout(to_wait_ms);
 +
 +      if (time_after(target_jiffies, tmp_jiffies)) {
 +              remaining_jiffies = target_jiffies - tmp_jiffies;
 +              while (remaining_jiffies)
 +                      remaining_jiffies =
 +                          schedule_timeout_uninterruptible(remaining_jiffies);
 +      }
 +}
 +
  #endif
@@@ -1,6 -1,5 +1,6 @@@
  /*
   * Copyright Â© 2010 Daniel Vetter
 + * Copyright Â© 2011-2014 Intel Corporation
   *
   * Permission is hereby granted, free of charge, to any person obtaining a
   * copy of this software and associated documentation files (the "Software"),
   *
   */
  
 +#include <linux/seq_file.h>
  #include <drm/drmP.h>
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
  
 +bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 +{
 +      if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
 +              return false;
 +
 +      if (i915.enable_ppgtt == 1 && full)
 +              return false;
 +
 +#ifdef CONFIG_INTEL_IOMMU
 +      /* Disable ppgtt on SNB if VT-d is on. */
 +      if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
 +              DRM_INFO("Disabling PPGTT because VT-d is on\n");
 +              return false;
 +      }
 +#endif
 +
 +      /* Full ppgtt disabled by default for now due to issues. */
 +      if (full)
 +              return false; /* HAS_PPGTT(dev) */
 +      else
 +              return HAS_ALIASING_PPGTT(dev);
 +}
 +
  #define GEN6_PPGTT_PD_ENTRIES 512
  #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
  typedef uint64_t gen8_gtt_pte_t;
@@@ -88,31 -63,13 +88,31 @@@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t
  
  #define GEN8_PTES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
  #define GEN8_PDES_PER_PAGE            (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
 -#define GEN8_LEGACY_PDPS              4
 +
 +/* GEN8 legacy style addressis defined as a 3 level page table:
 + * 31:30 | 29:21 | 20:12 |  11:0
 + * PDPE  |  PDE  |  PTE  | offset
 + * The difference as compared to normal x86 3 level page table is the PDPEs are
 + * programmed via register.
 + */
 +#define GEN8_PDPE_SHIFT                       30
 +#define GEN8_PDPE_MASK                        0x3
 +#define GEN8_PDE_SHIFT                        21
 +#define GEN8_PDE_MASK                 0x1ff
 +#define GEN8_PTE_SHIFT                        12
 +#define GEN8_PTE_MASK                 0x1ff
  
  #define PPAT_UNCACHED_INDEX           (_PAGE_PWT | _PAGE_PCD)
  #define PPAT_CACHED_PDE_INDEX         0 /* WB LLC */
  #define PPAT_CACHED_INDEX             _PAGE_PAT /* WB LLCeLLC */
  #define PPAT_DISPLAY_ELLC_INDEX               _PAGE_PCD /* WT eLLC */
  
 +static void ppgtt_bind_vma(struct i915_vma *vma,
 +                         enum i915_cache_level cache_level,
 +                         u32 flags);
 +static void ppgtt_unbind_vma(struct i915_vma *vma);
 +static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
 +
  static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
                                             bool valid)
@@@ -242,19 -199,12 +242,19 @@@ static gen6_gtt_pte_t iris_pte_encode(d
  
  /* Broadwell Page Directory Pointer Descriptors */
  static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
 -                         uint64_t val)
 +                         uint64_t val, bool synchronous)
  {
 +      struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
  
        BUG_ON(entry >= 4);
  
 +      if (synchronous) {
 +              I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
 +              I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
 +              return 0;
 +      }
 +
        ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
        return 0;
  }
  
 -static int gen8_ppgtt_enable(struct drm_device *dev)
 +static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
 +                        struct intel_ring_buffer *ring,
 +                        bool synchronous)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_ring_buffer *ring;
 -      struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 -      int i, j, ret;
 +      int i, ret;
  
        /* bit of a hack to find the actual last used pd */
        int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
  
 -      for_each_ring(ring, dev_priv, j) {
 -              I915_WRITE(RING_MODE_GEN7(ring),
 -                         _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 -      }
 -
        for (i = used_pd - 1; i >= 0; i--) {
                dma_addr_t addr = ppgtt->pd_dma_addr[i];
 -              for_each_ring(ring, dev_priv, j) {
 -                      ret = gen8_write_pdp(ring, i, addr);
 -                      if (ret)
 -                              goto err_out;
 -              }
 +              ret = gen8_write_pdp(ring, i, addr, synchronous);
 +              if (ret)
 +                      return ret;
        }
 -      return 0;
  
 -err_out:
 -      for_each_ring(ring, dev_priv, j)
 -              I915_WRITE(RING_MODE_GEN7(ring),
 -                         _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
 -      return ret;
 +      return 0;
  }
  
  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 -                                 unsigned first_entry,
 -                                 unsigned num_entries,
 +                                 uint64_t start,
 +                                 uint64_t length,
                                   bool use_scratch)
  {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr, scratch_pte;
 -      unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
 -      unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
 +      unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 +      unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
 +      unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
 +      unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
  
        scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
                                      I915_CACHE_LLC, use_scratch);
  
        while (num_entries) {
 -              struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
 +              struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
  
 -              last_pte = first_pte + num_entries;
 +              last_pte = pte + num_entries;
                if (last_pte > GEN8_PTES_PER_PAGE)
                        last_pte = GEN8_PTES_PER_PAGE;
  
                pt_vaddr = kmap_atomic(page_table);
  
 -              for (i = first_pte; i < last_pte; i++)
 +              for (i = pte; i < last_pte; i++) {
                        pt_vaddr[i] = scratch_pte;
 +                      num_entries--;
 +              }
  
                kunmap_atomic(pt_vaddr);
  
 -              num_entries -= last_pte - first_pte;
 -              first_pte = 0;
 -              act_pt++;
 +              pte = 0;
 +              if (++pde == GEN8_PDES_PER_PAGE) {
 +                      pdpe++;
 +                      pde = 0;
 +              }
        }
  }
  
  static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
 -                                    unsigned first_entry,
 +                                    uint64_t start,
                                      enum i915_cache_level cache_level)
  {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_gtt_pte_t *pt_vaddr;
 -      unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
 -      unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
 +      unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 +      unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
 +      unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
        struct sg_page_iter sg_iter;
  
        pt_vaddr = NULL;
 +
        for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 +              if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
 +                      break;
 +
                if (pt_vaddr == NULL)
 -                      pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
 +                      pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
  
 -              pt_vaddr[act_pte] =
 +              pt_vaddr[pte] =
                        gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
                                        cache_level, true);
 -              if (++act_pte == GEN8_PTES_PER_PAGE) {
 +              if (++pte == GEN8_PTES_PER_PAGE) {
                        kunmap_atomic(pt_vaddr);
                        pt_vaddr = NULL;
 -                      act_pt++;
 -                      act_pte = 0;
 +                      if (++pde == GEN8_PDES_PER_PAGE) {
 +                              pdpe++;
 +                              pde = 0;
 +                      }
 +                      pte = 0;
                }
        }
        if (pt_vaddr)
                kunmap_atomic(pt_vaddr);
  }
  
 +static void gen8_free_page_tables(struct page **pt_pages)
 +{
 +      int i;
 +
 +      if (pt_pages == NULL)
 +              return;
 +
 +      for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
 +              if (pt_pages[i])
 +                      __free_pages(pt_pages[i], 0);
 +}
 +
 +static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
 +{
 +      int i;
 +
 +      for (i = 0; i < ppgtt->num_pd_pages; i++) {
 +              gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
 +              kfree(ppgtt->gen8_pt_pages[i]);
 +              kfree(ppgtt->gen8_pt_dma_addr[i]);
 +      }
 +
 +      __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
 +}
 +
 +static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct pci_dev *hwdev = ppgtt->base.dev->pdev;
 +      int i, j;
 +
 +      for (i = 0; i < ppgtt->num_pd_pages; i++) {
 +              /* TODO: In the future we'll support sparse mappings, so this
 +               * will have to change. */
 +              if (!ppgtt->pd_dma_addr[i])
 +                      continue;
 +
 +              pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
 +                             PCI_DMA_BIDIRECTIONAL);
 +
 +              for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 +                      dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
 +                      if (addr)
 +                              pci_unmap_page(hwdev, addr, PAGE_SIZE,
 +                                             PCI_DMA_BIDIRECTIONAL);
 +              }
 +      }
 +}
 +
  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
  {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
 -      int i, j;
  
 +      list_del(&vm->global_link);
        drm_mm_takedown(&vm->mm);
  
 -      for (i = 0; i < ppgtt->num_pd_pages ; i++) {
 -              if (ppgtt->pd_dma_addr[i]) {
 -                      pci_unmap_page(ppgtt->base.dev->pdev,
 -                                     ppgtt->pd_dma_addr[i],
 -                                     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 +      gen8_ppgtt_unmap_pages(ppgtt);
 +      gen8_ppgtt_free(ppgtt);
 +}
 +
 +static struct page **__gen8_alloc_page_tables(void)
 +{
 +      struct page **pt_pages;
 +      int i;
  
 -                      for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 -                              dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
 -                              if (addr)
 -                                      pci_unmap_page(ppgtt->base.dev->pdev,
 -                                                     addr,
 -                                                     PAGE_SIZE,
 -                                                     PCI_DMA_BIDIRECTIONAL);
 +      pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
 +      if (!pt_pages)
 +              return ERR_PTR(-ENOMEM);
  
 -                      }
 -              }
 -              kfree(ppgtt->gen8_pt_dma_addr[i]);
 +      for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
 +              pt_pages[i] = alloc_page(GFP_KERNEL);
 +              if (!pt_pages[i])
 +                      goto bail;
        }
  
 -      __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
 -      __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
 +      return pt_pages;
 +
 +bail:
 +      gen8_free_page_tables(pt_pages);
 +      kfree(pt_pages);
 +      return ERR_PTR(-ENOMEM);
  }
  
 -/**
 - * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
 - * net effect resembling a 2-level page table in normal x86 terms. Each PDP
 - * represents 1GB of memory
 - * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
 - *
 - * TODO: Do something with the size parameter
 - **/
 -static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
 +                                         const int max_pdp)
  {
 -      struct page *pt_pages;
 -      int i, j, ret = -ENOMEM;
 -      const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
 -      const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
 +      struct page **pt_pages[GEN8_LEGACY_PDPS];
 +      int i, ret;
  
 -      if (size % (1<<30))
 -              DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 +      for (i = 0; i < max_pdp; i++) {
 +              pt_pages[i] = __gen8_alloc_page_tables();
 +              if (IS_ERR(pt_pages[i])) {
 +                      ret = PTR_ERR(pt_pages[i]);
 +                      goto unwind_out;
 +              }
 +      }
  
 -      /* FIXME: split allocation into smaller pieces. For now we only ever do
 -       * this once, but with full PPGTT, the multiple contiguous allocations
 -       * will be bad.
 +      /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
 +       * "atomic" - for cleanup purposes.
         */
 +      for (i = 0; i < max_pdp; i++)
 +              ppgtt->gen8_pt_pages[i] = pt_pages[i];
 +
 +      return 0;
 +
 +unwind_out:
 +      while (i--) {
 +              gen8_free_page_tables(pt_pages[i]);
 +              kfree(pt_pages[i]);
 +      }
 +
 +      return ret;
 +}
 +
 +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
 +{
 +      int i;
 +
 +      for (i = 0; i < ppgtt->num_pd_pages; i++) {
 +              ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
 +                                                   sizeof(dma_addr_t),
 +                                                   GFP_KERNEL);
 +              if (!ppgtt->gen8_pt_dma_addr[i])
 +                      return -ENOMEM;
 +      }
 +
 +      return 0;
 +}
 +
 +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
 +                                              const int max_pdp)
 +{
        ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
        if (!ppgtt->pd_pages)
                return -ENOMEM;
  
 -      pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
 -      if (!pt_pages) {
 +      ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
 +      BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 +
 +      return 0;
 +}
 +
 +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
 +                          const int max_pdp)
 +{
 +      int ret;
 +
 +      ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
 +      if (ret)
 +              return ret;
 +
 +      ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
 +      if (ret) {
                __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
 -              return -ENOMEM;
 +              return ret;
        }
  
 -      ppgtt->gen8_pt_pages = pt_pages;
 -      ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
 -      ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
        ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
 -      ppgtt->enable = gen8_ppgtt_enable;
 -      ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 -      ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 -      ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 -      ppgtt->base.start = 0;
 -      ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
  
 -      BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 +      ret = gen8_ppgtt_allocate_dma(ppgtt);
 +      if (ret)
 +              gen8_ppgtt_free(ppgtt);
  
 -      /*
 -       * - Create a mapping for the page directories.
 -       * - For each page directory:
 -       *      allocate space for page table mappings.
 -       *      map each page table
 -       */
 -      for (i = 0; i < max_pdp; i++) {
 -              dma_addr_t temp;
 -              temp = pci_map_page(ppgtt->base.dev->pdev,
 -                                  &ppgtt->pd_pages[i], 0,
 -                                  PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 -              if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
 -                      goto err_out;
 +      return ret;
 +}
  
 -              ppgtt->pd_dma_addr[i] = temp;
 +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
 +                                           const int pd)
 +{
 +      dma_addr_t pd_addr;
 +      int ret;
  
 -              ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
 -              if (!ppgtt->gen8_pt_dma_addr[i])
 -                      goto err_out;
 +      pd_addr = pci_map_page(ppgtt->base.dev->pdev,
 +                             &ppgtt->pd_pages[pd], 0,
 +                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  
 -              for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 -                      struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
 -                      temp = pci_map_page(ppgtt->base.dev->pdev,
 -                                          p, 0, PAGE_SIZE,
 -                                          PCI_DMA_BIDIRECTIONAL);
 +      ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
 +      if (ret)
 +              return ret;
 +
 +      ppgtt->pd_dma_addr[pd] = pd_addr;
  
 -                      if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
 -                              goto err_out;
 +      return 0;
 +}
  
 -                      ppgtt->gen8_pt_dma_addr[i][j] = temp;
 +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
 +                                      const int pd,
 +                                      const int pt)
 +{
 +      dma_addr_t pt_addr;
 +      struct page *p;
 +      int ret;
 +
 +      p = ppgtt->gen8_pt_pages[pd][pt];
 +      pt_addr = pci_map_page(ppgtt->base.dev->pdev,
 +                             p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 +      ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
 +      if (ret)
 +              return ret;
 +
 +      ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
 +
 +      return 0;
 +}
 +
 +/**
 + * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 + * with a net effect resembling a 2-level page table in normal x86 terms. Each
 + * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 + * space.
 + *
 + * FIXME: split allocation into smaller pieces. For now we only ever do this
 + * once, but with full PPGTT, the multiple contiguous allocations will be bad.
 + * TODO: Do something with the size parameter
 + */
 +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 +{
 +      const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
 +      const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
 +      int i, j, ret;
 +
 +      if (size % (1<<30))
 +              DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
 +
 +      /* 1. Do all our allocations for page directories and page tables. */
 +      ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
 +      if (ret)
 +              return ret;
 +
 +      /*
 +       * 2. Create DMA mappings for the page directories and page tables.
 +       */
 +      for (i = 0; i < max_pdp; i++) {
 +              ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
 +              if (ret)
 +                      goto bail;
 +
 +              for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
 +                      ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
 +                      if (ret)
 +                              goto bail;
                }
        }
  
 -      /* For now, the PPGTT helper functions all require that the PDEs are
 +      /*
 +       * 3. Map all the page directory entires to point to the page tables
 +       * we've allocated.
 +       *
 +       * For now, the PPGTT helper functions all require that the PDEs are
         * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
 -       * will never need to touch the PDEs again */
 +       * will never need to touch the PDEs again.
 +       */
        for (i = 0; i < max_pdp; i++) {
                gen8_ppgtt_pde_t *pd_vaddr;
                pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
                kunmap_atomic(pd_vaddr);
        }
  
 -      ppgtt->base.clear_range(&ppgtt->base, 0,
 -                              ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
 -                              true);
 +      ppgtt->enable = gen8_ppgtt_enable;
 +      ppgtt->switch_mm = gen8_mm_switch;
 +      ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 +      ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 +      ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 +      ppgtt->base.start = 0;
 +      ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 +
 +      ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
  
        DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
                         ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
        DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
 -                       ppgtt->num_pt_pages,
 -                       (ppgtt->num_pt_pages - num_pt_pages) +
 -                       size % (1<<30));
 +                       ppgtt->num_pd_entries,
 +                       (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
        return 0;
  
 -err_out:
 -      ppgtt->base.cleanup(&ppgtt->base);
 +bail:
 +      gen8_ppgtt_unmap_pages(ppgtt);
 +      gen8_ppgtt_free(ppgtt);
        return ret;
  }
  
 +static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 +{
 +      struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
 +      struct i915_address_space *vm = &ppgtt->base;
 +      gen6_gtt_pte_t __iomem *pd_addr;
 +      gen6_gtt_pte_t scratch_pte;
 +      uint32_t pd_entry;
 +      int pte, pde;
 +
 +      scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 +
 +      pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
 +              ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
 +
 +      seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
 +                 ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
 +      for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
 +              u32 expected;
 +              gen6_gtt_pte_t *pt_vaddr;
 +              dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
 +              pd_entry = readl(pd_addr + pde);
 +              expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 +
 +              if (pd_entry != expected)
 +                      seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
 +                                 pde,
 +                                 pd_entry,
 +                                 expected);
 +              seq_printf(m, "\tPDE: %x\n", pd_entry);
 +
 +              pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
 +              for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
 +                      unsigned long va =
 +                              (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
 +                              (pte * PAGE_SIZE);
 +                      int i;
 +                      bool found = false;
 +                      for (i = 0; i < 4; i++)
 +                              if (pt_vaddr[pte + i] != scratch_pte)
 +                                      found = true;
 +                      if (!found)
 +                              continue;
 +
 +                      seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
 +                      for (i = 0; i < 4; i++) {
 +                              if (pt_vaddr[pte + i] != scratch_pte)
 +                                      seq_printf(m, " %08x", pt_vaddr[pte + i]);
 +                              else
 +                                      seq_puts(m, "  SCRATCH ");
 +                      }
 +                      seq_puts(m, "\n");
 +              }
 +              kunmap_atomic(pt_vaddr);
 +      }
 +}
 +
  static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
  {
        struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
        readl(pd_addr);
  }
  
 -static int gen6_ppgtt_enable(struct drm_device *dev)
 +static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      uint32_t pd_offset;
 +      BUG_ON(ppgtt->pd_offset & 0x3f);
 +
 +      return (ppgtt->pd_offset / 64) << 16;
 +}
 +
 +static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 +                       struct intel_ring_buffer *ring,
 +                       bool synchronous)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int ret;
 +
 +      /* If we're in reset, we can assume the GPU is sufficiently idle to
 +       * manually frob these bits. Ideally we could use the ring functions,
 +       * except our error handling makes it quite difficult (can't use
 +       * intel_ring_begin, ring->flush, or intel_ring_advance)
 +       *
 +       * FIXME: We should try not to special case reset
 +       */
 +      if (synchronous ||
 +          i915_reset_in_progress(&dev_priv->gpu_error)) {
 +              WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
 +              I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 +              I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 +              POSTING_READ(RING_PP_DIR_BASE(ring));
 +              return 0;
 +      }
 +
 +      /* NB: TLBs must be flushed and invalidated before a switch */
 +      ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 +      if (ret)
 +              return ret;
 +
 +      ret = intel_ring_begin(ring, 6);
 +      if (ret)
 +              return ret;
 +
 +      intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
 +      intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
 +      intel_ring_emit(ring, PP_DIR_DCLV_2G);
 +      intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
 +      intel_ring_emit(ring, get_pd_offset(ppgtt));
 +      intel_ring_emit(ring, MI_NOOP);
 +      intel_ring_advance(ring);
 +
 +      return 0;
 +}
 +
 +static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 +                        struct intel_ring_buffer *ring,
 +                        bool synchronous)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int ret;
 +
 +      /* If we're in reset, we can assume the GPU is sufficiently idle to
 +       * manually frob these bits. Ideally we could use the ring functions,
 +       * except our error handling makes it quite difficult (can't use
 +       * intel_ring_begin, ring->flush, or intel_ring_advance)
 +       *
 +       * FIXME: We should try not to special case reset
 +       */
 +      if (synchronous ||
 +          i915_reset_in_progress(&dev_priv->gpu_error)) {
 +              WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
 +              I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 +              I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 +              POSTING_READ(RING_PP_DIR_BASE(ring));
 +              return 0;
 +      }
 +
 +      /* NB: TLBs must be flushed and invalidated before a switch */
 +      ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 +      if (ret)
 +              return ret;
 +
 +      ret = intel_ring_begin(ring, 6);
 +      if (ret)
 +              return ret;
 +
 +      intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
 +      intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
 +      intel_ring_emit(ring, PP_DIR_DCLV_2G);
 +      intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
 +      intel_ring_emit(ring, get_pd_offset(ppgtt));
 +      intel_ring_emit(ring, MI_NOOP);
 +      intel_ring_advance(ring);
 +
 +      /* XXX: RCS is the only one to auto invalidate the TLBs? */
 +      if (ring->id != RCS) {
 +              ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
 +                        struct intel_ring_buffer *ring,
 +                        bool synchronous)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (!synchronous)
 +              return 0;
 +
 +      I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 +      I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
 +
 +      POSTING_READ(RING_PP_DIR_DCLV(ring));
 +
 +      return 0;
 +}
 +
 +static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
 -      struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 -      int i;
 +      int j, ret;
  
 -      BUG_ON(ppgtt->pd_offset & 0x3f);
 +      for_each_ring(ring, dev_priv, j) {
 +              I915_WRITE(RING_MODE_GEN7(ring),
 +                         _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
  
 -      gen6_write_pdes(ppgtt);
 +              /* We promise to do a switch later with FULL PPGTT. If this is
 +               * aliasing, this is the one and only switch we'll do */
 +              if (USES_FULL_PPGTT(dev))
 +                      continue;
  
 -      pd_offset = ppgtt->pd_offset;
 -      pd_offset /= 64; /* in cachelines, */
 -      pd_offset <<= 16;
 +              ret = ppgtt->switch_mm(ppgtt, ring, true);
 +              if (ret)
 +                      goto err_out;
 +      }
  
 -      if (INTEL_INFO(dev)->gen == 6) {
 -              uint32_t ecochk, gab_ctl, ecobits;
 +      return 0;
  
 -              ecobits = I915_READ(GAC_ECO_BITS);
 -              I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
 -                                       ECOBITS_PPGTT_CACHE64B);
 +err_out:
 +      for_each_ring(ring, dev_priv, j)
 +              I915_WRITE(RING_MODE_GEN7(ring),
 +                         _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
 +      return ret;
 +}
  
 -              gab_ctl = I915_READ(GAB_CTL);
 -              I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 +static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_ring_buffer *ring;
 +      uint32_t ecochk, ecobits;
 +      int i;
  
 -              ecochk = I915_READ(GAM_ECOCHK);
 -              I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
 -                                     ECOCHK_PPGTT_CACHE64B);
 -              I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 -      } else if (INTEL_INFO(dev)->gen >= 7) {
 -              uint32_t ecochk, ecobits;
 +      ecobits = I915_READ(GAC_ECO_BITS);
 +      I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
  
 -              ecobits = I915_READ(GAC_ECO_BITS);
 -              I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 +      ecochk = I915_READ(GAM_ECOCHK);
 +      if (IS_HASWELL(dev)) {
 +              ecochk |= ECOCHK_PPGTT_WB_HSW;
 +      } else {
 +              ecochk |= ECOCHK_PPGTT_LLC_IVB;
 +              ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
 +      }
 +      I915_WRITE(GAM_ECOCHK, ecochk);
  
 -              ecochk = I915_READ(GAM_ECOCHK);
 -              if (IS_HASWELL(dev)) {
 -                      ecochk |= ECOCHK_PPGTT_WB_HSW;
 -              } else {
 -                      ecochk |= ECOCHK_PPGTT_LLC_IVB;
 -                      ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
 -              }
 -              I915_WRITE(GAM_ECOCHK, ecochk);
 +      for_each_ring(ring, dev_priv, i) {
 +              int ret;
                /* GFX_MODE is per-ring on gen7+ */
 +              I915_WRITE(RING_MODE_GEN7(ring),
 +                         _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 +
 +              /* We promise to do a switch later with FULL PPGTT. If this is
 +               * aliasing, this is the one and only switch we'll do */
 +              if (USES_FULL_PPGTT(dev))
 +                      continue;
 +
 +              ret = ppgtt->switch_mm(ppgtt, ring, true);
 +              if (ret)
 +                      return ret;
        }
  
 -      for_each_ring(ring, dev_priv, i) {
 -              if (INTEL_INFO(dev)->gen >= 7)
 -                      I915_WRITE(RING_MODE_GEN7(ring),
 -                                 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 +      return 0;
 +}
  
 -              I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
 -              I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
 +static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct intel_ring_buffer *ring;
 +      uint32_t ecochk, gab_ctl, ecobits;
 +      int i;
 +
 +      ecobits = I915_READ(GAC_ECO_BITS);
 +      I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
 +                 ECOBITS_PPGTT_CACHE64B);
 +
 +      gab_ctl = I915_READ(GAB_CTL);
 +      I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 +
 +      ecochk = I915_READ(GAM_ECOCHK);
 +      I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 +
 +      I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 +
 +      for_each_ring(ring, dev_priv, i) {
 +              int ret = ppgtt->switch_mm(ppgtt, ring, true);
 +              if (ret)
 +                      return ret;
        }
 +
        return 0;
  }
  
  /* PPGTT support for Sandybdrige/Gen6 and later */
  static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 -                                 unsigned first_entry,
 -                                 unsigned num_entries,
 +                                 uint64_t start,
 +                                 uint64_t length,
                                   bool use_scratch)
  {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr, scratch_pte;
 +      unsigned first_entry = start >> PAGE_SHIFT;
 +      unsigned num_entries = length >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
  
  static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
 -                                    unsigned first_entry,
 +                                    uint64_t start,
                                      enum i915_cache_level cache_level)
  {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr;
 +      unsigned first_entry = start >> PAGE_SHIFT;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        struct sg_page_iter sg_iter;
                kunmap_atomic(pt_vaddr);
  }
  
 -static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 +static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
  {
 -      struct i915_hw_ppgtt *ppgtt =
 -              container_of(vm, struct i915_hw_ppgtt, base);
        int i;
  
 -      drm_mm_takedown(&ppgtt->base.mm);
 -
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
                        pci_unmap_page(ppgtt->base.dev->pdev,
                                       ppgtt->pt_dma_addr[i],
                                       4096, PCI_DMA_BIDIRECTIONAL);
        }
 +}
 +
 +static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 +{
 +      int i;
  
        kfree(ppgtt->pt_dma_addr);
        for (i = 0; i < ppgtt->num_pd_entries; i++)
                __free_page(ppgtt->pt_pages[i]);
        kfree(ppgtt->pt_pages);
 -      kfree(ppgtt);
  }
  
 -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 +static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
  {
 +      struct i915_hw_ppgtt *ppgtt =
 +              container_of(vm, struct i915_hw_ppgtt, base);
 +
 +      list_del(&vm->global_link);
 +      drm_mm_takedown(&ppgtt->base.mm);
 +      drm_mm_remove_node(&ppgtt->node);
 +
 +      gen6_ppgtt_unmap_pages(ppgtt);
 +      gen6_ppgtt_free(ppgtt);
 +}
 +
 +static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 +{
 +#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
 +#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      unsigned first_pd_entry_in_global_pt;
 -      int i;
 -      int ret = -ENOMEM;
 +      bool retried = false;
 +      int ret;
  
 -      /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
 -       * entries. For aliasing ppgtt support we just steal them at the end for
 -       * now. */
 -      first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
 +      /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
 +       * allocator works in address space sizes, so it's multiplied by page
 +       * size. We allocate at the top of the GTT to avoid fragmentation.
 +       */
 +      BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
 +alloc:
 +      ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
 +                                                &ppgtt->node, GEN6_PD_SIZE,
 +                                                GEN6_PD_ALIGN, 0,
 +                                                0, dev_priv->gtt.base.total,
 +                                                DRM_MM_SEARCH_DEFAULT);
 +      if (ret == -ENOSPC && !retried) {
 +              ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
 +                                             GEN6_PD_SIZE, GEN6_PD_ALIGN,
 +                                             I915_CACHE_NONE, 0);
 +              if (ret)
 +                      return ret;
 +
 +              retried = true;
 +              goto alloc;
 +      }
 +
 +      if (ppgtt->node.start < dev_priv->gtt.mappable_end)
 +              DRM_DEBUG("Forced to use aperture for PDEs\n");
  
 -      ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
 -      ppgtt->enable = gen6_ppgtt_enable;
 -      ppgtt->base.clear_range = gen6_ppgtt_clear_range;
 -      ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 -      ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 -      ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 -      ppgtt->base.start = 0;
 -      ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
 +      return ret;
 +}
 +
 +static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
 +{
 +      int i;
 +
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
 +
        if (!ppgtt->pt_pages)
                return -ENOMEM;
  
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
 -              if (!ppgtt->pt_pages[i])
 -                      goto err_pt_alloc;
 +              if (!ppgtt->pt_pages[i]) {
 +                      gen6_ppgtt_free(ppgtt);
 +                      return -ENOMEM;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
 +{
 +      int ret;
 +
 +      ret = gen6_ppgtt_allocate_page_directories(ppgtt);
 +      if (ret)
 +              return ret;
 +
 +      ret = gen6_ppgtt_allocate_page_tables(ppgtt);
 +      if (ret) {
 +              drm_mm_remove_node(&ppgtt->node);
 +              return ret;
        }
  
        ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
                                     GFP_KERNEL);
 -      if (!ppgtt->pt_dma_addr)
 -              goto err_pt_alloc;
 +      if (!ppgtt->pt_dma_addr) {
 +              drm_mm_remove_node(&ppgtt->node);
 +              gen6_ppgtt_free(ppgtt);
 +              return -ENOMEM;
 +      }
 +
 +      return 0;
 +}
 +
 +static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      int i;
  
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                dma_addr_t pt_addr;
                                       PCI_DMA_BIDIRECTIONAL);
  
                if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
 -                      ret = -EIO;
 -                      goto err_pd_pin;
 -
 +                      gen6_ppgtt_unmap_pages(ppgtt);
 +                      return -EIO;
                }
 +
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
  
 -      ppgtt->base.clear_range(&ppgtt->base, 0,
 -                              ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
 +      return 0;
 +}
  
 -      ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 +{
 +      struct drm_device *dev = ppgtt->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int ret;
  
 -      return 0;
 +      ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
 +      if (IS_GEN6(dev)) {
 +              ppgtt->enable = gen6_ppgtt_enable;
 +              ppgtt->switch_mm = gen6_mm_switch;
 +      } else if (IS_HASWELL(dev)) {
 +              ppgtt->enable = gen7_ppgtt_enable;
 +              ppgtt->switch_mm = hsw_mm_switch;
 +      } else if (IS_GEN7(dev)) {
 +              ppgtt->enable = gen7_ppgtt_enable;
 +              ppgtt->switch_mm = gen7_mm_switch;
 +      } else
 +              BUG();
  
 -err_pd_pin:
 -      if (ppgtt->pt_dma_addr) {
 -              for (i--; i >= 0; i--)
 -                      pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
 -                                     4096, PCI_DMA_BIDIRECTIONAL);
 -      }
 -err_pt_alloc:
 -      kfree(ppgtt->pt_dma_addr);
 -      for (i = 0; i < ppgtt->num_pd_entries; i++) {
 -              if (ppgtt->pt_pages[i])
 -                      __free_page(ppgtt->pt_pages[i]);
 +      ret = gen6_ppgtt_alloc(ppgtt);
 +      if (ret)
 +              return ret;
 +
 +      ret = gen6_ppgtt_setup_page_tables(ppgtt);
 +      if (ret) {
 +              gen6_ppgtt_free(ppgtt);
 +              return ret;
        }
 -      kfree(ppgtt->pt_pages);
  
 -      return ret;
 +      ppgtt->base.clear_range = gen6_ppgtt_clear_range;
 +      ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 +      ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 +      ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 +      ppgtt->base.start = 0;
 +      ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
 +      ppgtt->debug_dump = gen6_dump_ppgtt;
 +
 +      ppgtt->pd_offset =
 +              ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
 +
 +      ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
 +
 +      DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
 +                       ppgtt->node.size >> 20,
 +                       ppgtt->node.start / PAGE_SIZE);
 +
 +      return 0;
  }
  
 -static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 +int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct i915_hw_ppgtt *ppgtt;
 -      int ret;
 -
 -      ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
 -      if (!ppgtt)
 -              return -ENOMEM;
 +      int ret = 0;
  
        ppgtt->base.dev = dev;
  
        else
                BUG();
  
 -      if (ret)
 -              kfree(ppgtt);
 -      else {
 -              dev_priv->mm.aliasing_ppgtt = ppgtt;
 +      if (!ret) {
 +              struct drm_i915_private *dev_priv = dev->dev_private;
 +              kref_init(&ppgtt->ref);
                drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
                            ppgtt->base.total);
 +              i915_init_vm(dev_priv, &ppgtt->base);
 +              if (INTEL_INFO(dev)->gen < 8) {
 +                      gen6_write_pdes(ppgtt);
 +                      DRM_DEBUG("Adding PPGTT at offset %x\n",
 +                                ppgtt->pd_offset << 10);
 +              }
        }
  
        return ret;
  }
  
 -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 +static void
 +ppgtt_bind_vma(struct i915_vma *vma,
 +             enum i915_cache_level cache_level,
 +             u32 flags)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 -
 -      if (!ppgtt)
 -              return;
 +      WARN_ON(flags);
  
 -      ppgtt->base.cleanup(&ppgtt->base);
 -      dev_priv->mm.aliasing_ppgtt = NULL;
 +      vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
 +                              cache_level);
  }
  
 -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 -                          struct drm_i915_gem_object *obj,
 -                          enum i915_cache_level cache_level)
 +static void ppgtt_unbind_vma(struct i915_vma *vma)
  {
 -      ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
 -                                 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
 -                                 cache_level);
 -}
 -
 -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 -                            struct drm_i915_gem_object *obj)
 -{
 -      ppgtt->base.clear_range(&ppgtt->base,
 -                              i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
 -                              obj->base.size >> PAGE_SHIFT,
 -                              true);
 +      vma->vm->clear_range(vma->vm,
 +                           vma->node.start,
 +                           vma->obj->base.size,
 +                           true);
  }
  
  extern int intel_iommu_gfx_mapped;
@@@ -1337,8 -840,8 +1337,8 @@@ void i915_gem_suspend_gtt_mappings(stru
        i915_check_and_clear_faults(dev);
  
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
 -                                     dev_priv->gtt.base.start / PAGE_SIZE,
 -                                     dev_priv->gtt.base.total / PAGE_SIZE,
 +                                     dev_priv->gtt.base.start,
 +                                     dev_priv->gtt.base.total,
                                       false);
  }
  
@@@ -1346,44 -849,18 +1346,44 @@@ void i915_gem_restore_gtt_mappings(stru
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 +      struct i915_address_space *vm;
  
        i915_check_and_clear_faults(dev);
  
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
 -                                     dev_priv->gtt.base.start / PAGE_SIZE,
 -                                     dev_priv->gtt.base.total / PAGE_SIZE,
 +                                     dev_priv->gtt.base.start,
 +                                     dev_priv->gtt.base.total,
                                       true);
  
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 +              struct i915_vma *vma = i915_gem_obj_to_vma(obj,
 +                                                         &dev_priv->gtt.base);
 +              if (!vma)
 +                      continue;
 +
                i915_gem_clflush_object(obj, obj->pin_display);
 -              i915_gem_gtt_bind_object(obj, obj->cache_level);
 +              /* The bind_vma code tries to be smart about tracking mappings.
 +               * Unfortunately above, we've just wiped out the mappings
 +               * without telling our object about it. So we need to fake it.
 +               */
 +              obj->has_global_gtt_mapping = 0;
 +              vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 +      }
 +
 +
 +      if (INTEL_INFO(dev)->gen >= 8)
 +              return;
 +
 +      list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
 +              /* TODO: Perhaps it shouldn't be gen6 specific */
 +              if (i915_is_ggtt(vm)) {
 +                      if (dev_priv->mm.aliasing_ppgtt)
 +                              gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
 +                      continue;
 +              }
 +
 +              gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
        }
  
        i915_gem_chipset_flush(dev);
@@@ -1414,11 -891,10 +1414,11 @@@ static inline void gen8_set_pte(void __
  
  static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
 -                                   unsigned int first_entry,
 +                                   uint64_t start,
                                     enum i915_cache_level level)
  {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
 +      unsigned first_entry = start >> PAGE_SHIFT;
        gen8_gtt_pte_t __iomem *gtt_entries =
                (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
   */
  static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
 -                                   unsigned int first_entry,
 +                                   uint64_t start,
                                     enum i915_cache_level level)
  {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
 +      unsigned first_entry = start >> PAGE_SHIFT;
        gen6_gtt_pte_t __iomem *gtt_entries =
                (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
  }
  
  static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 -                                unsigned int first_entry,
 -                                unsigned int num_entries,
 +                                uint64_t start,
 +                                uint64_t length,
                                  bool use_scratch)
  {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
 +      unsigned first_entry = start >> PAGE_SHIFT;
 +      unsigned num_entries = length >> PAGE_SHIFT;
        gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
  }
  
  static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 -                                unsigned int first_entry,
 -                                unsigned int num_entries,
 +                                uint64_t start,
 +                                uint64_t length,
                                  bool use_scratch)
  {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
 +      unsigned first_entry = start >> PAGE_SHIFT;
 +      unsigned num_entries = length >> PAGE_SHIFT;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
        readl(gtt_base);
  }
  
 -static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 -                                   struct sg_table *st,
 -                                   unsigned int pg_start,
 -                                   enum i915_cache_level cache_level)
 +
 +static void i915_ggtt_bind_vma(struct i915_vma *vma,
 +                             enum i915_cache_level cache_level,
 +                             u32 unused)
  {
 +      const unsigned long entry = vma->node.start >> PAGE_SHIFT;
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
  
 -      intel_gtt_insert_sg_entries(st, pg_start, flags);
 -
 +      BUG_ON(!i915_is_ggtt(vma->vm));
 +      intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
 +      vma->obj->has_global_gtt_mapping = 1;
  }
  
  static void i915_ggtt_clear_range(struct i915_address_space *vm,
 -                                unsigned int first_entry,
 -                                unsigned int num_entries,
 +                                uint64_t start,
 +                                uint64_t length,
                                  bool unused)
  {
 +      unsigned first_entry = start >> PAGE_SHIFT;
 +      unsigned num_entries = length >> PAGE_SHIFT;
        intel_gtt_clear_range(first_entry, num_entries);
  }
  
 +static void i915_ggtt_unbind_vma(struct i915_vma *vma)
 +{
 +      const unsigned int first = vma->node.start >> PAGE_SHIFT;
 +      const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
 +
 +      BUG_ON(!i915_is_ggtt(vma->vm));
 +      vma->obj->has_global_gtt_mapping = 0;
 +      intel_gtt_clear_range(first, size);
 +}
  
 -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 -                            enum i915_cache_level cache_level)
 +static void ggtt_bind_vma(struct i915_vma *vma,
 +                        enum i915_cache_level cache_level,
 +                        u32 flags)
  {
 -      struct drm_device *dev = obj->base.dev;
 +      struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 +      struct drm_i915_gem_object *obj = vma->obj;
  
 -      dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
 -                                        entry,
 -                                        cache_level);
 +      /* If there is no aliasing PPGTT, or the caller needs a global mapping,
 +       * or we have a global mapping already but the cacheability flags have
 +       * changed, set the global PTEs.
 +       *
 +       * If there is an aliasing PPGTT it is anecdotally faster, so use that
 +       * instead if none of the above hold true.
 +       *
 +       * NB: A global mapping should only be needed for special regions like
 +       * "gtt mappable", SNB errata, or if specified via special execbuf
 +       * flags. At all other times, the GPU will use the aliasing PPGTT.
 +       */
 +      if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
 +              if (!obj->has_global_gtt_mapping ||
 +                  (cache_level != obj->cache_level)) {
 +                      vma->vm->insert_entries(vma->vm, obj->pages,
 +                                              vma->node.start,
 +                                              cache_level);
 +                      obj->has_global_gtt_mapping = 1;
 +              }
 +      }
  
 -      obj->has_global_gtt_mapping = 1;
 +      if (dev_priv->mm.aliasing_ppgtt &&
 +          (!obj->has_aliasing_ppgtt_mapping ||
 +           (cache_level != obj->cache_level))) {
 +              struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 +              appgtt->base.insert_entries(&appgtt->base,
 +                                          vma->obj->pages,
 +                                          vma->node.start,
 +                                          cache_level);
 +              vma->obj->has_aliasing_ppgtt_mapping = 1;
 +      }
  }
  
 -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 +static void ggtt_unbind_vma(struct i915_vma *vma)
  {
 -      struct drm_device *dev = obj->base.dev;
 +      struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 -
 -      dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
 -                                     entry,
 -                                     obj->base.size >> PAGE_SHIFT,
 -                                     true);
 +      struct drm_i915_gem_object *obj = vma->obj;
 +
 +      if (obj->has_global_gtt_mapping) {
 +              vma->vm->clear_range(vma->vm,
 +                                   vma->node.start,
 +                                   obj->base.size,
 +                                   true);
 +              obj->has_global_gtt_mapping = 0;
 +      }
  
 -      obj->has_global_gtt_mapping = 0;
 +      if (obj->has_aliasing_ppgtt_mapping) {
 +              struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 +              appgtt->base.clear_range(&appgtt->base,
 +                                       vma->node.start,
 +                                       obj->base.size,
 +                                       true);
 +              obj->has_aliasing_ppgtt_mapping = 0;
 +      }
  }
  
  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@@ -1724,14 -1145,29 +1724,14 @@@ void i915_gem_setup_global_gtt(struct d
  
        /* Clear any non-preallocated blocks */
        drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
 -              const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
 -              ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
 +              ggtt_vm->clear_range(ggtt_vm, hole_start,
 +                                   hole_end - hole_start, true);
        }
  
        /* And finally clear the reserved guard page */
 -      ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
 -}
 -
 -static bool
 -intel_enable_ppgtt(struct drm_device *dev)
 -{
 -      if (i915_enable_ppgtt >= 0)
 -              return i915_enable_ppgtt;
 -
 -#ifdef CONFIG_INTEL_IOMMU
 -      /* Disable ppgtt on SNB if VT-d is on. */
 -      if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
 -              return false;
 -#endif
 -
 -      return true;
 +      ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
  }
  
  void i915_gem_init_global_gtt(struct drm_device *dev)
        gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
  
 -      if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
 -              int ret;
 -
 -              if (INTEL_INFO(dev)->gen <= 7) {
 -                      /* PPGTT pdes are stolen from global gtt ptes, so shrink the
 -                       * aperture accordingly when using aliasing ppgtt. */
 -                      gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
 -              }
 -
 -              i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 -
 -              ret = i915_gem_init_aliasing_ppgtt(dev);
 -              if (!ret)
 -                      return;
 -
 -              DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
 -              drm_mm_takedown(&dev_priv->gtt.base.mm);
 -              if (INTEL_INFO(dev)->gen < 8)
 -                      gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
 -      }
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
  }
  
@@@ -1796,6 -1252,11 +1796,6 @@@ static inline unsigned int gen8_get_tot
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
 -      if (bdw_gmch_ctl > 4) {
 -              WARN_ON(!i915_preliminary_hw_support);
 -              return 4<<20;
 -      }
 -
        return bdw_gmch_ctl << 20;
  }
  
@@@ -1817,14 -1278,14 +1817,14 @@@ static int ggtt_probe_common(struct drm
                             size_t gtt_size)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       phys_addr_t gtt_bus_addr;
+       phys_addr_t gtt_phys_addr;
        int ret;
  
        /* For Modern GENs the PTEs and register space are split in the BAR */
-       gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+       gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
                (pci_resource_len(dev->pdev, 0) / 2);
  
-       dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+       dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
        if (!dev_priv->gtt.gsm) {
                DRM_ERROR("Failed to map the gtt page table\n");
                return -ENOMEM;
@@@ -1977,6 -1438,7 +1977,6 @@@ static int i915_gmch_probe(struct drm_d
  
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
        dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
 -      dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
  
        if (unlikely(dev_priv->gtt.do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@@ -2031,62 -1493,3 +2031,62 @@@ int i915_gem_gtt_init(struct drm_devic
  
        return 0;
  }
 +
 +static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
 +                                            struct i915_address_space *vm)
 +{
 +      struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 +      if (vma == NULL)
 +              return ERR_PTR(-ENOMEM);
 +
 +      INIT_LIST_HEAD(&vma->vma_link);
 +      INIT_LIST_HEAD(&vma->mm_list);
 +      INIT_LIST_HEAD(&vma->exec_list);
 +      vma->vm = vm;
 +      vma->obj = obj;
 +
 +      switch (INTEL_INFO(vm->dev)->gen) {
 +      case 8:
 +      case 7:
 +      case 6:
 +              if (i915_is_ggtt(vm)) {
 +                      vma->unbind_vma = ggtt_unbind_vma;
 +                      vma->bind_vma = ggtt_bind_vma;
 +              } else {
 +                      vma->unbind_vma = ppgtt_unbind_vma;
 +                      vma->bind_vma = ppgtt_bind_vma;
 +              }
 +              break;
 +      case 5:
 +      case 4:
 +      case 3:
 +      case 2:
 +              BUG_ON(!i915_is_ggtt(vm));
 +              vma->unbind_vma = i915_ggtt_unbind_vma;
 +              vma->bind_vma = i915_ggtt_bind_vma;
 +              break;
 +      default:
 +              BUG();
 +      }
 +
 +      /* Keep GGTT vmas first to make debug easier */
 +      if (i915_is_ggtt(vm))
 +              list_add(&vma->vma_link, &obj->vma_list);
 +      else
 +              list_add_tail(&vma->vma_link, &obj->vma_list);
 +
 +      return vma;
 +}
 +
 +struct i915_vma *
 +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 +                                struct i915_address_space *vm)
 +{
 +      struct i915_vma *vma;
 +
 +      vma = i915_gem_obj_to_vma(obj, vm);
 +      if (!vma)
 +              vma = __i915_gem_vma_create(obj, vm);
 +
 +      return vma;
 +}
@@@ -146,7 -146,10 +146,10 @@@ static void i915_error_vprintf(struct d
                va_list tmp;
  
                va_copy(tmp, args);
-               if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+               len = vsnprintf(NULL, 0, f, tmp);
+               va_end(tmp);
+               if (!__i915_error_seek(e, len))
                        return;
        }
  
@@@ -235,61 -238,50 +238,61 @@@ static const char *hangcheck_action_to_
  
  static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  struct drm_device *dev,
 -                                struct drm_i915_error_state *error,
 -                                unsigned ring)
 +                                struct drm_i915_error_ring *ring)
  {
 -      BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
 -      if (!error->ring[ring].valid)
 +      if (!ring->valid)
                return;
  
 -      err_printf(m, "%s command stream:\n", ring_str(ring));
 -      err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
 -      err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
 -      err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
 -      err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
 -      err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
 -      err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
 -      err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 +      err_printf(m, "  HEAD: 0x%08x\n", ring->head);
 +      err_printf(m, "  TAIL: 0x%08x\n", ring->tail);
 +      err_printf(m, "  CTL: 0x%08x\n", ring->ctl);
 +      err_printf(m, "  HWS: 0x%08x\n", ring->hws);
 +      err_printf(m, "  ACTHD: 0x%08x\n", ring->acthd);
 +      err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
 +      err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
 +      err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
        if (INTEL_INFO(dev)->gen >= 4) {
 -              err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr[ring]);
 -              err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
 -              err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 +              err_printf(m, "  BBADDR: 0x%08llx\n", ring->bbaddr);
 +              err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
 +              err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
        }
 -      err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
 -      err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 +      err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
 +      err_printf(m, "  FADDR: 0x%08x\n", ring->faddr);
        if (INTEL_INFO(dev)->gen >= 6) {
 -              err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
 -              err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
 +              err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
 +              err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
                err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
 -                         error->semaphore_mboxes[ring][0],
 -                         error->semaphore_seqno[ring][0]);
 +                         ring->semaphore_mboxes[0],
 +                         ring->semaphore_seqno[0]);
                err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
 -                         error->semaphore_mboxes[ring][1],
 -                         error->semaphore_seqno[ring][1]);
 +                         ring->semaphore_mboxes[1],
 +                         ring->semaphore_seqno[1]);
                if (HAS_VEBOX(dev)) {
                        err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
 -                                 error->semaphore_mboxes[ring][2],
 -                                 error->semaphore_seqno[ring][2]);
 +                                 ring->semaphore_mboxes[2],
 +                                 ring->semaphore_seqno[2]);
                }
        }
 -      err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
 -      err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 -      err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 -      err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 +      if (USES_PPGTT(dev)) {
 +              err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
 +
 +              if (INTEL_INFO(dev)->gen >= 8) {
 +                      int i;
 +                      for (i = 0; i < 4; i++)
 +                              err_printf(m, "  PDP%d: 0x%016llx\n",
 +                                         i, ring->vm_info.pdp[i]);
 +              } else {
 +                      err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
 +                                 ring->vm_info.pp_dir_base);
 +              }
 +      }
 +      err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
 +      err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
 +      err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
 +      err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
        err_printf(m, "  hangcheck: %s [%d]\n",
 -                 hangcheck_action_to_str(error->hangcheck_action[ring]),
 -                 error->hangcheck_score[ring]);
 +                 hangcheck_action_to_str(ring->hangcheck_action),
 +                 ring->hangcheck_score);
  }
  
  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
        va_end(args);
  }
  
 +static void print_error_obj(struct drm_i915_error_state_buf *m,
 +                          struct drm_i915_error_object *obj)
 +{
 +      int page, offset, elt;
 +
 +      for (page = offset = 0; page < obj->page_count; page++) {
 +              for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 +                      err_printf(m, "%08x :  %08x\n", offset,
 +                                 obj->pages[page][elt]);
 +                      offset += 4;
 +              }
 +      }
 +}
 +
  int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                            const struct i915_error_state_file_priv *error_priv)
  {
        struct drm_device *dev = error_priv->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error = error_priv->error;
 -      int i, j, page, offset, elt;
 +      int i, j, offset, elt;
 +      int max_hangcheck_score;
  
        if (!error) {
                err_printf(m, "no error state collected\n");
                goto out;
        }
  
 +      err_printf(m, "%s\n", error->error_msg);
        err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
 +      max_hangcheck_score = 0;
 +      for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 +              if (error->ring[i].hangcheck_score > max_hangcheck_score)
 +                      max_hangcheck_score = error->ring[i].hangcheck_score;
 +      }
 +      for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 +              if (error->ring[i].hangcheck_score == max_hangcheck_score &&
 +                  error->ring[i].pid != -1) {
 +                      err_printf(m, "Active process (on ring %s): %s [%d]\n",
 +                                 ring_str(i),
 +                                 error->ring[i].comm,
 +                                 error->ring[i].pid);
 +              }
 +      }
 +      err_printf(m, "Reset count: %u\n", error->reset_count);
 +      err_printf(m, "Suspend count: %u\n", error->suspend_count);
        err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
        err_printf(m, "EIR: 0x%08x\n", error->eir);
        err_printf(m, "IER: 0x%08x\n", error->ier);
        if (INTEL_INFO(dev)->gen == 7)
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  
 -      for (i = 0; i < ARRAY_SIZE(error->ring); i++)
 -              i915_ring_error_state(m, dev, error, i);
 +      for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 +              err_printf(m, "%s command stream:\n", ring_str(i));
 +              i915_ring_error_state(m, dev, &error->ring[i]);
 +      }
  
        if (error->active_bo)
                print_error_buffers(m, "Active",
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                struct drm_i915_error_object *obj;
  
 -              if ((obj = error->ring[i].batchbuffer)) {
 -                      err_printf(m, "%s --- gtt_offset = 0x%08x\n",
 -                                 dev_priv->ring[i].name,
 +              obj = error->ring[i].batchbuffer;
 +              if (obj) {
 +                      err_puts(m, dev_priv->ring[i].name);
 +                      if (error->ring[i].pid != -1)
 +                              err_printf(m, " (submitted by %s [%d])",
 +                                         error->ring[i].comm,
 +                                         error->ring[i].pid);
 +                      err_printf(m, " --- gtt_offset = 0x%08x\n",
                                   obj->gtt_offset);
 -                      offset = 0;
 -                      for (page = 0; page < obj->page_count; page++) {
 -                              for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 -                                      err_printf(m, "%08x :  %08x\n", offset,
 -                                                 obj->pages[page][elt]);
 -                                      offset += 4;
 -                              }
 -                      }
 +                      print_error_obj(m, obj);
 +              }
 +
 +              obj = error->ring[i].wa_batchbuffer;
 +              if (obj) {
 +                      err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
 +                                 dev_priv->ring[i].name, obj->gtt_offset);
 +                      print_error_obj(m, obj);
                }
  
                if (error->ring[i].num_requests) {
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->ring[i].name,
                                   obj->gtt_offset);
 +                      print_error_obj(m, obj);
 +              }
 +
 +              if ((obj = error->ring[i].hws_page)) {
 +                      err_printf(m, "%s --- HW Status = 0x%08x\n",
 +                                 dev_priv->ring[i].name,
 +                                 obj->gtt_offset);
                        offset = 0;
 -                      for (page = 0; page < obj->page_count; page++) {
 -                              for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 -                                      err_printf(m, "%08x :  %08x\n",
 -                                                 offset,
 -                                                 obj->pages[page][elt]);
 -                                      offset += 4;
 -                              }
 +                      for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
 +                              err_printf(m, "[%04x] %08x %08x %08x %08x\n",
 +                                         offset,
 +                                         obj->pages[0][elt],
 +                                         obj->pages[0][elt+1],
 +                                         obj->pages[0][elt+2],
 +                                         obj->pages[0][elt+3]);
 +                                      offset += 16;
                        }
                }
  
@@@ -527,7 -472,6 +530,7 @@@ static void i915_error_state_free(struc
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                i915_error_object_free(error->ring[i].batchbuffer);
                i915_error_object_free(error->ring[i].ringbuffer);
 +              i915_error_object_free(error->ring[i].hws_page);
                i915_error_object_free(error->ring[i].ctx);
                kfree(error->ring[i].requests);
        }
  static struct drm_i915_error_object *
  i915_error_object_create_sized(struct drm_i915_private *dev_priv,
                               struct drm_i915_gem_object *src,
 +                             struct i915_address_space *vm,
                               const int num_pages)
  {
        struct drm_i915_error_object *dst;
        if (dst == NULL)
                return NULL;
  
 -      reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
 +      reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
        for (i = 0; i < num_pages; i++) {
                unsigned long flags;
                void *d;
                        goto unwind;
  
                local_irq_save(flags);
 -              if (reloc_offset < dev_priv->gtt.mappable_end &&
 -                  src->has_global_gtt_mapping) {
 +              if (src->cache_level == I915_CACHE_NONE &&
 +                  reloc_offset < dev_priv->gtt.mappable_end &&
 +                  src->has_global_gtt_mapping &&
 +                  i915_is_ggtt(vm)) {
                        void __iomem *s;
  
                        /* Simply ignore tiling or any overlapping fence.
@@@ -618,12 -559,8 +621,12 @@@ unwind
        kfree(dst);
        return NULL;
  }
 -#define i915_error_object_create(dev_priv, src) \
 -      i915_error_object_create_sized((dev_priv), (src), \
 +#define i915_error_object_create(dev_priv, src, vm) \
 +      i915_error_object_create_sized((dev_priv), (src), (vm), \
 +                                     (src)->base.size>>PAGE_SHIFT)
 +
 +#define i915_error_ggtt_object_create(dev_priv, src) \
 +      i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
                                       (src)->base.size>>PAGE_SHIFT)
  
  static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = obj->fence_reg;
        err->pinned = 0;
 -      if (obj->pin_count > 0)
 +      if (i915_gem_obj_is_pinned(obj))
                err->pinned = 1;
        if (obj->user_pin_count > 0)
                err->pinned = -1;
@@@ -671,7 -608,7 +674,7 @@@ static u32 capture_pinned_bo(struct drm
        int i = 0;
  
        list_for_each_entry(obj, head, global_list) {
 -              if (obj->pin_count == 0)
 +              if (!i915_gem_obj_is_pinned(obj))
                        continue;
  
                capture_bo(err++, obj);
        return i;
  }
  
 +/* Generate a semi-unique error code. The code is not meant to have meaning, The
 + * code's only purpose is to try to prevent false duplicated bug reports by
 + * grossly estimating a GPU error state.
 + *
 + * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
 + * the hang if we could strip the GTT offset information from it.
 + *
 + * It's only a small step better than a random number in its current form.
 + */
 +static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 +                                       struct drm_i915_error_state *error,
 +                                       int *ring_id)
 +{
 +      uint32_t error_code = 0;
 +      int i;
 +
 +      /* IPEHR would be an ideal way to detect errors, as it's the gross
 +       * measure of "the command that hung." However, has some very common
 +       * synchronization commands which almost always appear in the case
 +       * strictly a client bug. Use instdone to differentiate those some.
 +       */
 +      for (i = 0; i < I915_NUM_RINGS; i++) {
 +              if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
 +                      if (ring_id)
 +                              *ring_id = i;
 +
 +                      return error->ring[i].ipehr ^ error->ring[i].instdone;
 +              }
 +      }
 +
 +      return error_code;
 +}
 +
  static void i915_gem_record_fences(struct drm_device *dev,
                                   struct drm_i915_error_state *error)
  {
        }
  }
  
 -static struct drm_i915_error_object *
 -i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 -                           struct intel_ring_buffer *ring)
 -{
 -      struct i915_address_space *vm;
 -      struct i915_vma *vma;
 -      struct drm_i915_gem_object *obj;
 -      u32 seqno;
 -
 -      if (!ring->get_seqno)
 -              return NULL;
 -
 -      if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
 -              u32 acthd = I915_READ(ACTHD);
 -
 -              if (WARN_ON(ring->id != RCS))
 -                      return NULL;
 -
 -              obj = ring->scratch.obj;
 -              if (obj != NULL &&
 -                  acthd >= i915_gem_obj_ggtt_offset(obj) &&
 -                  acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
 -                      return i915_error_object_create(dev_priv, obj);
 -      }
 -
 -      seqno = ring->get_seqno(ring, false);
 -      list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
 -              list_for_each_entry(vma, &vm->active_list, mm_list) {
 -                      obj = vma->obj;
 -                      if (obj->ring != ring)
 -                              continue;
 -
 -                      if (i915_seqno_passed(seqno, obj->last_read_seqno))
 -                              continue;
 -
 -                      if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
 -                              continue;
 -
 -                      /* We need to copy these to an anonymous buffer as the simplest
 -                       * method to avoid being overwritten by userspace.
 -                       */
 -                      return i915_error_object_create(dev_priv, obj);
 -              }
 -      }
 -
 -      return NULL;
 -}
 -
  static void i915_record_ring_state(struct drm_device *dev,
 -                                 struct drm_i915_error_state *error,
 -                                 struct intel_ring_buffer *ring)
 +                                 struct intel_ring_buffer *ring,
 +                                 struct drm_i915_error_ring *ering)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (INTEL_INFO(dev)->gen >= 6) {
 -              error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
 -              error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
 -              error->semaphore_mboxes[ring->id][0]
 +              ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
 +              ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
 +              ering->semaphore_mboxes[0]
                        = I915_READ(RING_SYNC_0(ring->mmio_base));
 -              error->semaphore_mboxes[ring->id][1]
 +              ering->semaphore_mboxes[1]
                        = I915_READ(RING_SYNC_1(ring->mmio_base));
 -              error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
 -              error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
 +              ering->semaphore_seqno[0] = ring->sync_seqno[0];
 +              ering->semaphore_seqno[1] = ring->sync_seqno[1];
        }
  
        if (HAS_VEBOX(dev)) {
 -              error->semaphore_mboxes[ring->id][2] =
 +              ering->semaphore_mboxes[2] =
                        I915_READ(RING_SYNC_2(ring->mmio_base));
 -              error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
 +              ering->semaphore_seqno[2] = ring->sync_seqno[2];
        }
  
        if (INTEL_INFO(dev)->gen >= 4) {
 -              error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
 -              error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
 -              error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
 -              error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
 -              error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 -              error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
 +              ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
 +              ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
 +              ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
 +              ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
 +              ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
 +              ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
                if (INTEL_INFO(dev)->gen >= 8)
 -                      error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
 -              error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
 +                      ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
 +              ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
        } else {
 -              error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 -              error->ipeir[ring->id] = I915_READ(IPEIR);
 -              error->ipehr[ring->id] = I915_READ(IPEHR);
 -              error->instdone[ring->id] = I915_READ(INSTDONE);
 +              ering->faddr = I915_READ(DMA_FADD_I8XX);
 +              ering->ipeir = I915_READ(IPEIR);
 +              ering->ipehr = I915_READ(IPEHR);
 +              ering->instdone = I915_READ(INSTDONE);
 +      }
 +
 +      ering->waiting = waitqueue_active(&ring->irq_queue);
 +      ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
 +      ering->seqno = ring->get_seqno(ring, false);
 +      ering->acthd = intel_ring_get_active_head(ring);
 +      ering->head = I915_READ_HEAD(ring);
 +      ering->tail = I915_READ_TAIL(ring);
 +      ering->ctl = I915_READ_CTL(ring);
 +
 +      if (I915_NEED_GFX_HWS(dev)) {
 +              int mmio;
 +
 +              if (IS_GEN7(dev)) {
 +                      switch (ring->id) {
 +                      default:
 +                      case RCS:
 +                              mmio = RENDER_HWS_PGA_GEN7;
 +                              break;
 +                      case BCS:
 +                              mmio = BLT_HWS_PGA_GEN7;
 +                              break;
 +                      case VCS:
 +                              mmio = BSD_HWS_PGA_GEN7;
 +                              break;
 +                      case VECS:
 +                              mmio = VEBOX_HWS_PGA_GEN7;
 +                              break;
 +                      }
 +              } else if (IS_GEN6(ring->dev)) {
 +                      mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
 +              } else {
 +                      /* XXX: gen8 returns to sanity */
 +                      mmio = RING_HWS_PGA(ring->mmio_base);
 +              }
 +
 +              ering->hws = I915_READ(mmio);
        }
  
 -      error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
 -      error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
 -      error->seqno[ring->id] = ring->get_seqno(ring, false);
 -      error->acthd[ring->id] = intel_ring_get_active_head(ring);
 -      error->head[ring->id] = I915_READ_HEAD(ring);
 -      error->tail[ring->id] = I915_READ_TAIL(ring);
 -      error->ctl[ring->id] = I915_READ_CTL(ring);
 +      ering->cpu_ring_head = ring->head;
 +      ering->cpu_ring_tail = ring->tail;
 +
 +      ering->hangcheck_score = ring->hangcheck.score;
 +      ering->hangcheck_action = ring->hangcheck.action;
 +
 +      if (USES_PPGTT(dev)) {
 +              int i;
  
 -      error->cpu_ring_head[ring->id] = ring->head;
 -      error->cpu_ring_tail[ring->id] = ring->tail;
 +              ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
  
 -      error->hangcheck_score[ring->id] = ring->hangcheck.score;
 -      error->hangcheck_action[ring->id] = ring->hangcheck.action;
 +              switch (INTEL_INFO(dev)->gen) {
 +              case 8:
 +                      for (i = 0; i < 4; i++) {
 +                              ering->vm_info.pdp[i] =
 +                                      I915_READ(GEN8_RING_PDP_UDW(ring, i));
 +                              ering->vm_info.pdp[i] <<= 32;
 +                              ering->vm_info.pdp[i] |=
 +                                      I915_READ(GEN8_RING_PDP_LDW(ring, i));
 +                      }
 +                      break;
 +              case 7:
 +                      ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring);
 +                      break;
 +              case 6:
 +                      ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring);
 +                      break;
 +              }
 +      }
  }
  
  
@@@ -871,9 -770,7 +874,9 @@@ static void i915_gem_record_active_cont
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
 -                                                                  obj, 1);
 +                                                                  obj,
 +                                                                  &dev_priv->gtt.base,
 +                                                                  1);
                        break;
                }
        }
@@@ -894,48 -791,14 +897,48 @@@ static void i915_gem_record_rings(struc
  
                error->ring[i].valid = true;
  
 -              i915_record_ring_state(dev, error, ring);
 +              i915_record_ring_state(dev, ring, &error->ring[i]);
  
 -              error->ring[i].batchbuffer =
 -                      i915_error_first_batchbuffer(dev_priv, ring);
 +              error->ring[i].pid = -1;
 +              request = i915_gem_find_active_request(ring);
 +              if (request) {
 +                      /* We need to copy these to an anonymous buffer
 +                       * as the simplest method to avoid being overwritten
 +                       * by userspace.
 +                       */
 +                      error->ring[i].batchbuffer =
 +                              i915_error_object_create(dev_priv,
 +                                                       request->batch_obj,
 +                                                       request->ctx ?
 +                                                       request->ctx->vm :
 +                                                       &dev_priv->gtt.base);
 +
 +                      if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
 +                          ring->scratch.obj)
 +                              error->ring[i].wa_batchbuffer =
 +                                      i915_error_ggtt_object_create(dev_priv,
 +                                                           ring->scratch.obj);
 +
 +                      if (request->file_priv) {
 +                              struct task_struct *task;
 +
 +                              rcu_read_lock();
 +                              task = pid_task(request->file_priv->file->pid,
 +                                              PIDTYPE_PID);
 +                              if (task) {
 +                                      strcpy(error->ring[i].comm, task->comm);
 +                                      error->ring[i].pid = task->pid;
 +                              }
 +                              rcu_read_unlock();
 +                      }
 +              }
  
                error->ring[i].ringbuffer =
 -                      i915_error_object_create(dev_priv, ring->obj);
 +                      i915_error_ggtt_object_create(dev_priv, ring->obj);
  
 +              if (ring->status_page.obj)
 +                      error->ring[i].hws_page =
 +                              i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
  
                i915_gem_record_active_context(ring, error, &error->ring[i]);
  
@@@ -982,7 -845,7 +985,7 @@@ static void i915_gem_capture_vm(struct 
                i++;
        error->active_bo_count[ndx] = i;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
 -              if (obj->pin_count)
 +              if (i915_gem_obj_is_pinned(obj))
                        i++;
        error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
  
@@@ -1016,6 -879,11 +1019,6 @@@ static void i915_gem_capture_buffers(st
        list_for_each_entry(vm, &dev_priv->vm_list, global_link)
                cnt++;
  
 -      if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
 -              cnt = 1;
 -
 -      vm = &dev_priv->gtt.base;
 -
        error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
        error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
        error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
                i915_gem_capture_vm(dev_priv, error, vm, i++);
  }
  
 +/* Capture all registers which don't fit into another category. */
 +static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
 +                                 struct drm_i915_error_state *error)
 +{
 +      struct drm_device *dev = dev_priv->dev;
 +      int pipe;
 +
 +      /* General organization
 +       * 1. Registers specific to a single generation
 +       * 2. Registers which belong to multiple generations
 +       * 3. Feature specific registers.
 +       * 4. Everything else
 +       * Please try to follow the order.
 +       */
 +
 +      /* 1: Registers specific to a single generation */
 +      if (IS_VALLEYVIEW(dev)) {
 +              error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
 +              error->forcewake = I915_READ(FORCEWAKE_VLV);
 +      }
 +
 +      if (IS_GEN7(dev))
 +              error->err_int = I915_READ(GEN7_ERR_INT);
 +
 +      if (IS_GEN6(dev)) {
 +              error->forcewake = I915_READ(FORCEWAKE);
 +              error->gab_ctl = I915_READ(GAB_CTL);
 +              error->gfx_mode = I915_READ(GFX_MODE);
 +      }
 +
 +      if (IS_GEN2(dev))
 +              error->ier = I915_READ16(IER);
 +
 +      /* 2: Registers which belong to multiple generations */
 +      if (INTEL_INFO(dev)->gen >= 7)
 +              error->forcewake = I915_READ(FORCEWAKE_MT);
 +
 +      if (INTEL_INFO(dev)->gen >= 6) {
 +              error->derrmr = I915_READ(DERRMR);
 +              error->error = I915_READ(ERROR_GEN6);
 +              error->done_reg = I915_READ(DONE_REG);
 +      }
 +
 +      /* 3: Feature specific registers */
 +      if (IS_GEN6(dev) || IS_GEN7(dev)) {
 +              error->gam_ecochk = I915_READ(GAM_ECOCHK);
 +              error->gac_eco = I915_READ(GAC_ECO_BITS);
 +      }
 +
 +      /* 4: Everything else */
 +      if (HAS_HW_CONTEXTS(dev))
 +              error->ccid = I915_READ(CCID);
 +
 +      if (HAS_PCH_SPLIT(dev))
 +              error->ier = I915_READ(DEIER) | I915_READ(GTIER);
 +      else {
 +              error->ier = I915_READ(IER);
 +              for_each_pipe(pipe)
 +                      error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 +      }
 +
 +      /* 4: Everything else */
 +      error->eir = I915_READ(EIR);
 +      error->pgtbl_er = I915_READ(PGTBL_ER);
 +
 +      i915_get_extra_instdone(dev, error->extra_instdone);
 +}
 +
 +static void i915_error_capture_msg(struct drm_device *dev,
 +                                 struct drm_i915_error_state *error,
 +                                 bool wedged,
 +                                 const char *error_msg)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 ecode;
 +      int ring_id = -1, len;
 +
 +      ecode = i915_error_generate_code(dev_priv, error, &ring_id);
 +
 +      len = scnprintf(error->error_msg, sizeof(error->error_msg),
 +                      "GPU HANG: ecode %d:0x%08x", ring_id, ecode);
 +
 +      if (ring_id != -1 && error->ring[ring_id].pid != -1)
 +              len += scnprintf(error->error_msg + len,
 +                               sizeof(error->error_msg) - len,
 +                               ", in %s [%d]",
 +                               error->ring[ring_id].comm,
 +                               error->ring[ring_id].pid);
 +
 +      scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
 +                ", reason: %s, action: %s",
 +                error_msg,
 +                wedged ? "reset" : "continue");
 +}
 +
 +static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 +                                 struct drm_i915_error_state *error)
 +{
 +      error->reset_count = i915_reset_count(&dev_priv->gpu_error);
 +      error->suspend_count = dev_priv->suspend_count;
 +}
 +
  /**
   * i915_capture_error_state - capture an error record for later analysis
   * @dev: drm device
   * out a structure which becomes available in debugfs for user level tools
   * to pick up.
   */
 -void i915_capture_error_state(struct drm_device *dev)
 +void i915_capture_error_state(struct drm_device *dev, bool wedged,
 +                            const char *error_msg)
  {
 +      static bool warned;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
        unsigned long flags;
 -      int pipe;
 -
 -      spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
 -      error = dev_priv->gpu_error.first_error;
 -      spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 -      if (error)
 -              return;
  
        /* Account for pipe specific data like PIPE*STAT */
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
                return;
        }
  
 -      DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
 -               dev->primary->index);
 -      DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
 -      DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
 -      DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
 -      DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 -
        kref_init(&error->ref);
 -      error->eir = I915_READ(EIR);
 -      error->pgtbl_er = I915_READ(PGTBL_ER);
 -      if (HAS_HW_CONTEXTS(dev))
 -              error->ccid = I915_READ(CCID);
 -
 -      if (HAS_PCH_SPLIT(dev))
 -              error->ier = I915_READ(DEIER) | I915_READ(GTIER);
 -      else if (IS_VALLEYVIEW(dev))
 -              error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
 -      else if (IS_GEN2(dev))
 -              error->ier = I915_READ16(IER);
 -      else
 -              error->ier = I915_READ(IER);
 -
 -      if (INTEL_INFO(dev)->gen >= 6)
 -              error->derrmr = I915_READ(DERRMR);
 -
 -      if (IS_VALLEYVIEW(dev))
 -              error->forcewake = I915_READ(FORCEWAKE_VLV);
 -      else if (INTEL_INFO(dev)->gen >= 7)
 -              error->forcewake = I915_READ(FORCEWAKE_MT);
 -      else if (INTEL_INFO(dev)->gen == 6)
 -              error->forcewake = I915_READ(FORCEWAKE);
 -
 -      if (!HAS_PCH_SPLIT(dev))
 -              for_each_pipe(pipe)
 -                      error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 -
 -      if (INTEL_INFO(dev)->gen >= 6) {
 -              error->error = I915_READ(ERROR_GEN6);
 -              error->done_reg = I915_READ(DONE_REG);
 -      }
 -
 -      if (INTEL_INFO(dev)->gen == 7)
 -              error->err_int = I915_READ(GEN7_ERR_INT);
 -
 -      i915_get_extra_instdone(dev, error->extra_instdone);
  
 +      i915_capture_gen_state(dev_priv, error);
 +      i915_capture_reg_state(dev_priv, error);
        i915_gem_capture_buffers(dev_priv, error);
        i915_gem_record_fences(dev, error);
        i915_gem_record_rings(dev, error);
        error->overlay = intel_overlay_capture_error_state(dev);
        error->display = intel_display_capture_error_state(dev);
  
 +      i915_error_capture_msg(dev, error, wedged, error_msg);
 +      DRM_INFO("%s\n", error->error_msg);
 +
        spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
        if (dev_priv->gpu_error.first_error == NULL) {
                dev_priv->gpu_error.first_error = error;
        }
        spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
  
 -      if (error)
 +      if (error) {
                i915_error_state_free(&error->ref);
 +              return;
 +      }
 +
 +      if (!warned) {
 +              DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
 +              DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
 +              DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
 +              DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 +              DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
 +              warned = true;
 +      }
  }
  
  void i915_error_state_get(struct drm_device *dev,
@@@ -232,18 -232,6 +232,18 @@@ static bool cpt_can_enable_serr_int(str
        return true;
  }
  
 +static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 reg = PIPESTAT(pipe);
 +      u32 pipestat = I915_READ(reg) & 0x7fff0000;
 +
 +      assert_spin_locked(&dev_priv->irq_lock);
 +
 +      I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
 +      POSTING_READ(reg);
 +}
 +
  static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
  {
@@@ -387,15 -375,16 +387,15 @@@ static void cpt_set_fifo_underrun_repor
   *
   * Returns the previous state of underrun reporting.
   */
 -bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 -                                         enum pipe pipe, bool enable)
 +bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 +                                           enum pipe pipe, bool enable)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long flags;
        bool ret;
  
 -      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      assert_spin_locked(&dev_priv->irq_lock);
  
        ret = !intel_crtc->cpu_fifo_underrun_disabled;
  
  
        intel_crtc->cpu_fifo_underrun_disabled = !enable;
  
 -      if (IS_GEN5(dev) || IS_GEN6(dev))
 +      if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
 +              i9xx_clear_fifo_underrun(dev, pipe);
 +      else if (IS_GEN5(dev) || IS_GEN6(dev))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
                broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
  
  done:
 +      return ret;
 +}
 +
 +bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 +                                         enum pipe pipe, bool enable)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      unsigned long flags;
 +      bool ret;
 +
 +      spin_lock_irqsave(&dev_priv->irq_lock, flags);
 +      ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 +
        return ret;
  }
  
 +static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
 +                                                enum pipe pipe)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +
 +      return !intel_crtc->cpu_fifo_underrun_disabled;
 +}
 +
  /**
   * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
   * @dev: drm device
@@@ -494,103 -458,39 +494,103 @@@ done
  }
  
  
 -void
 -i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 +static void
 +__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 +                     u32 enable_mask, u32 status_mask)
  {
        u32 reg = PIPESTAT(pipe);
 -      u32 pipestat = I915_READ(reg) & 0x7fff0000;
 +      u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
        assert_spin_locked(&dev_priv->irq_lock);
  
 -      if ((pipestat & mask) == mask)
 +      if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 +                       status_mask & ~PIPESTAT_INT_STATUS_MASK))
                return;
  
 +      if ((pipestat & enable_mask) == enable_mask)
 +              return;
 +
 +      dev_priv->pipestat_irq_mask[pipe] |= status_mask;
 +
        /* Enable the interrupt, clear any pending status */
 -      pipestat |= mask | (mask >> 16);
 +      pipestat |= enable_mask | status_mask;
        I915_WRITE(reg, pipestat);
        POSTING_READ(reg);
  }
  
 -void
 -i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 +static void
 +__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 +                      u32 enable_mask, u32 status_mask)
  {
        u32 reg = PIPESTAT(pipe);
 -      u32 pipestat = I915_READ(reg) & 0x7fff0000;
 +      u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
  
        assert_spin_locked(&dev_priv->irq_lock);
  
 -      if ((pipestat & mask) == 0)
 +      if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
 +                       status_mask & ~PIPESTAT_INT_STATUS_MASK))
 +              return;
 +
 +      if ((pipestat & enable_mask) == 0)
                return;
  
 -      pipestat &= ~mask;
 +      dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
 +
 +      pipestat &= ~enable_mask;
        I915_WRITE(reg, pipestat);
        POSTING_READ(reg);
  }
  
 +static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
 +{
 +      u32 enable_mask = status_mask << 16;
 +
 +      /*
 +       * On pipe A we don't support the PSR interrupt yet, on pipe B the
 +       * same bit MBZ.
 +       */
 +      if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
 +              return 0;
 +
 +      enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
 +                       SPRITE0_FLIP_DONE_INT_EN_VLV |
 +                       SPRITE1_FLIP_DONE_INT_EN_VLV);
 +      if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
 +              enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
 +      if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
 +              enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 +
 +      return enable_mask;
 +}
 +
 +void
 +i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 +                   u32 status_mask)
 +{
 +      u32 enable_mask;
 +
 +      if (IS_VALLEYVIEW(dev_priv->dev))
 +              enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
 +                                                         status_mask);
 +      else
 +              enable_mask = status_mask << 16;
 +      __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
 +}
 +
 +void
 +i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 +                    u32 status_mask)
 +{
 +      u32 enable_mask;
 +
 +      if (IS_VALLEYVIEW(dev_priv->dev))
 +              enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
 +                                                         status_mask);
 +      else
 +              enable_mask = status_mask << 16;
 +      __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
 +}
 +
  /**
   * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
   */
@@@ -604,10 -504,10 +604,10 @@@ static void i915_enable_asle_pipestat(s
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
  
 -      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
 +      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
 -                                   PIPE_LEGACY_BLC_EVENT_ENABLE);
 +                                   PIPE_LEGACY_BLC_EVENT_STATUS);
  
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
@@@ -667,8 -567,7 +667,7 @@@ static u32 i915_get_vblank_counter(stru
  
                vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
        } else {
-               enum transcoder cpu_transcoder =
-                       intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+               enum transcoder cpu_transcoder = (enum transcoder) pipe;
                u32 htotal;
  
                htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@@ -1016,11 -915,6 +1015,11 @@@ static void i915_hotplug_work_func(stru
                drm_kms_helper_hotplug_event(dev);
  }
  
 +static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
 +{
 +      del_timer_sync(&dev_priv->hotplug_reenable_timer);
 +}
 +
  static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -1072,43 -966,6 +1071,43 @@@ static void notify_ring(struct drm_devi
        i915_queue_hangcheck(dev);
  }
  
 +void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
 +                           u32 pm_iir, int new_delay)
 +{
 +      if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
 +              if (new_delay >= dev_priv->rps.max_delay) {
 +                      /* Mask UP THRESHOLD Interrupts */
 +                      I915_WRITE(GEN6_PMINTRMSK,
 +                                 I915_READ(GEN6_PMINTRMSK) |
 +                                 GEN6_PM_RP_UP_THRESHOLD);
 +                      dev_priv->rps.rp_up_masked = true;
 +              }
 +              if (dev_priv->rps.rp_down_masked) {
 +                      /* UnMask DOWN THRESHOLD Interrupts */
 +                      I915_WRITE(GEN6_PMINTRMSK,
 +                                 I915_READ(GEN6_PMINTRMSK) &
 +                                 ~GEN6_PM_RP_DOWN_THRESHOLD);
 +                      dev_priv->rps.rp_down_masked = false;
 +              }
 +      } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
 +              if (new_delay <= dev_priv->rps.min_delay) {
 +                      /* Mask DOWN THRESHOLD Interrupts */
 +                      I915_WRITE(GEN6_PMINTRMSK,
 +                                 I915_READ(GEN6_PMINTRMSK) |
 +                                 GEN6_PM_RP_DOWN_THRESHOLD);
 +                      dev_priv->rps.rp_down_masked = true;
 +              }
 +
 +              if (dev_priv->rps.rp_up_masked) {
 +                      /* UnMask UP THRESHOLD Interrupts */
 +                      I915_WRITE(GEN6_PMINTRMSK,
 +                                 I915_READ(GEN6_PMINTRMSK) &
 +                                 ~GEN6_PM_RP_UP_THRESHOLD);
 +                      dev_priv->rps.rp_up_masked = false;
 +              }
 +      }
 +}
 +
  static void gen6_pm_rps_work(struct work_struct *work)
  {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
         */
        new_delay = clamp_t(int, new_delay,
                            dev_priv->rps.min_delay, dev_priv->rps.max_delay);
 +
 +      gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
        dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
  
        if (IS_VALLEYVIEW(dev_priv->dev))
@@@ -1309,8 -1164,8 +1308,8 @@@ static void snb_gt_irq_handler(struct d
        if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
                      GT_BSD_CS_ERROR_INTERRUPT |
                      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
 -              DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
 -              i915_handle_error(dev, false);
 +              i915_handle_error(dev, false, "GT error interrupt 0x%08x",
 +                                gt_iir);
        }
  
        if (gt_iir & GT_PARITY_ERROR(dev))
@@@ -1381,9 -1236,6 +1380,9 @@@ static inline void intel_hpd_irq_handle
        if (!hotplug_trigger)
                return;
  
 +      DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 +                        hotplug_trigger);
 +
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
  
@@@ -1557,81 -1409,10 +1556,81 @@@ static void gen6_rps_irq_handler(struc
                        notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
  
                if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
 -                      DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
 -                      i915_handle_error(dev_priv->dev, false);
 +                      i915_handle_error(dev_priv->dev, false,
 +                                        "VEBOX CS error interrupt 0x%08x",
 +                                        pm_iir);
 +              }
 +      }
 +}
 +
 +static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 pipe_stats[I915_MAX_PIPES] = { };
 +      int pipe;
 +
 +      spin_lock(&dev_priv->irq_lock);
 +      for_each_pipe(pipe) {
 +              int reg;
 +              u32 mask, iir_bit = 0;
 +
 +              /*
 +               * PIPESTAT bits get signalled even when the interrupt is
 +               * disabled with the mask bits, and some of the status bits do
 +               * not generate interrupts at all (like the underrun bit). Hence
 +               * we need to be careful that we only handle what we want to
 +               * handle.
 +               */
 +              mask = 0;
 +              if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
 +                      mask |= PIPE_FIFO_UNDERRUN_STATUS;
 +
 +              switch (pipe) {
 +              case PIPE_A:
 +                      iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 +                      break;
 +              case PIPE_B:
 +                      iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 +                      break;
 +              }
 +              if (iir & iir_bit)
 +                      mask |= dev_priv->pipestat_irq_mask[pipe];
 +
 +              if (!mask)
 +                      continue;
 +
 +              reg = PIPESTAT(pipe);
 +              mask |= PIPESTAT_INT_ENABLE_MASK;
 +              pipe_stats[pipe] = I915_READ(reg) & mask;
 +
 +              /*
 +               * Clear the PIPE*STAT regs before the IIR
 +               */
 +              if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
 +                                      PIPESTAT_INT_STATUS_MASK))
 +                      I915_WRITE(reg, pipe_stats[pipe]);
 +      }
 +      spin_unlock(&dev_priv->irq_lock);
 +
 +      for_each_pipe(pipe) {
 +              if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 +                      drm_handle_vblank(dev, pipe);
 +
 +              if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
 +                      intel_prepare_page_flip(dev, pipe);
 +                      intel_finish_page_flip(dev, pipe);
                }
 +
 +              if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 +                      i9xx_pipe_crc_irq_handler(dev, pipe);
 +
 +              if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
 +                  intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
 +                      DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
        }
 +
 +      if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
 +              gmbus_irq_handler(dev);
  }
  
  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
 -      unsigned long irqflags;
 -      int pipe;
 -      u32 pipe_stats[I915_MAX_PIPES];
 -
 -      atomic_inc(&dev_priv->irq_received);
  
        while (true) {
                iir = I915_READ(VLV_IIR);
  
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
  
 -              spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -              for_each_pipe(pipe) {
 -                      int reg = PIPESTAT(pipe);
 -                      pipe_stats[pipe] = I915_READ(reg);
 -
 -                      /*
 -                       * Clear the PIPE*STAT regs before the IIR
 -                       */
 -                      if (pipe_stats[pipe] & 0x8000ffff) {
 -                              if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 -                                      DRM_DEBUG_DRIVER("pipe %c underrun\n",
 -                                                       pipe_name(pipe));
 -                              I915_WRITE(reg, pipe_stats[pipe]);
 -                      }
 -              }
 -              spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 -
 -              for_each_pipe(pipe) {
 -                      if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 -                              drm_handle_vblank(dev, pipe);
 -
 -                      if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 -                              intel_prepare_page_flip(dev, pipe);
 -                              intel_finish_page_flip(dev, pipe);
 -                      }
 -
 -                      if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 -                              i9xx_pipe_crc_irq_handler(dev, pipe);
 -              }
 +              valleyview_pipestat_irq_handler(dev, iir);
  
                /* Consume port.  Then clear IIR or we'll miss events */
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  
 -                      DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 -                                       hotplug_status);
 -
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  
                        if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
                        I915_READ(PORT_HOTPLUG_STAT);
                }
  
 -              if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
 -                      gmbus_irq_handler(dev);
  
                if (pm_iir)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
@@@ -1727,12 -1546,12 +1726,12 @@@ static void ibx_irq_handler(struct drm_
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
 -                      DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
 +                      DRM_ERROR("PCH transcoder A FIFO underrun\n");
  
        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
 -                      DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
 +                      DRM_ERROR("PCH transcoder B FIFO underrun\n");
  }
  
  static void ivb_err_int_handler(struct drm_device *dev)
                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
 -                              DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
 -                                               pipe_name(pipe));
 +                              DRM_ERROR("Pipe %c FIFO underrun\n",
 +                                        pipe_name(pipe));
                }
  
                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@@ -1774,17 -1593,17 +1773,17 @@@ static void cpt_serr_int_handler(struc
        if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
 -                      DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
 +                      DRM_ERROR("PCH transcoder A FIFO underrun\n");
  
        if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
 -                      DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
 +                      DRM_ERROR("PCH transcoder B FIFO underrun\n");
  
        if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
                                                          false))
 -                      DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
 +                      DRM_ERROR("PCH transcoder C FIFO underrun\n");
  
        I915_WRITE(SERR_INT, serr_int);
  }
@@@ -1846,8 -1665,8 +1845,8 @@@ static void ilk_display_irq_handler(str
  
                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
 -                              DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
 -                                               pipe_name(pipe));
 +                              DRM_ERROR("Pipe %c FIFO underrun\n",
 +                                        pipe_name(pipe));
  
                if (de_iir & DE_PIPE_CRC_DONE(pipe))
                        i9xx_pipe_crc_irq_handler(dev, pipe);
  static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      enum pipe i;
 +      enum pipe pipe;
  
        if (de_iir & DE_ERR_INT_IVB)
                ivb_err_int_handler(dev);
        if (de_iir & DE_GSE_IVB)
                intel_opregion_asle_intr(dev);
  
 -      for_each_pipe(i) {
 -              if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
 -                      drm_handle_vblank(dev, i);
 +      for_each_pipe(pipe) {
 +              if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
 +                      drm_handle_vblank(dev, pipe);
  
                /* plane/pipes map 1:1 on ilk+ */
 -              if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
 -                      intel_prepare_page_flip(dev, i);
 -                      intel_finish_page_flip_plane(dev, i);
 +              if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
 +                      intel_prepare_page_flip(dev, pipe);
 +                      intel_finish_page_flip_plane(dev, pipe);
                }
        }
  
@@@ -1919,6 -1738,8 +1918,6 @@@ static irqreturn_t ironlake_irq_handler
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
  
 -      atomic_inc(&dev_priv->irq_received);
 -
        /* We get interrupts on unclaimed registers, so check for this before we
         * do any I915_{READ,WRITE}. */
        intel_uncore_check_errors(dev);
@@@ -1987,6 -1808,8 +1986,6 @@@ static irqreturn_t gen8_irq_handler(in
        uint32_t tmp = 0;
        enum pipe pipe;
  
 -      atomic_inc(&dev_priv->irq_received);
 -
        master_ctl = I915_READ(GEN8_MASTER_IRQ);
        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
        if (!master_ctl)
                if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
 -                              DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
 -                                               pipe_name(pipe));
 +                              DRM_ERROR("Pipe %c FIFO underrun\n",
 +                                        pipe_name(pipe));
                }
  
                if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@@ -2291,18 -2114,11 +2290,18 @@@ static void i915_report_and_clear_eir(s
   * so userspace knows something bad happened (should trigger collection
   * of a ring dump etc.).
   */
 -void i915_handle_error(struct drm_device *dev, bool wedged)
 +void i915_handle_error(struct drm_device *dev, bool wedged,
 +                     const char *fmt, ...)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 +      va_list args;
 +      char error_msg[80];
 +
 +      va_start(args, fmt);
 +      vscnprintf(error_msg, sizeof(error_msg), fmt, args);
 +      va_end(args);
  
 -      i915_capture_error_state(dev);
 +      i915_capture_error_state(dev, wedged, error_msg);
        i915_report_and_clear_eir(dev);
  
        if (wedged) {
@@@ -2394,13 -2210,13 +2393,13 @@@ static int i915_enable_vblank(struct dr
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        if (INTEL_INFO(dev)->gen >= 4)
                i915_enable_pipestat(dev_priv, pipe,
 -                                   PIPE_START_VBLANK_INTERRUPT_ENABLE);
 +                                   PIPE_START_VBLANK_INTERRUPT_STATUS);
        else
                i915_enable_pipestat(dev_priv, pipe,
 -                                   PIPE_VBLANK_INTERRUPT_ENABLE);
 +                                   PIPE_VBLANK_INTERRUPT_STATUS);
  
        /* maintain vblank delivery even in deep C-states */
 -      if (dev_priv->info->gen == 3)
 +      if (INTEL_INFO(dev)->gen == 3)
                I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
@@@ -2428,13 -2244,20 +2427,13 @@@ static int valleyview_enable_vblank(str
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
 -      u32 imr;
  
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      imr = I915_READ(VLV_IMR);
 -      if (pipe == PIPE_A)
 -              imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 -      else
 -              imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 -      I915_WRITE(VLV_IMR, imr);
        i915_enable_pipestat(dev_priv, pipe,
 -                           PIPE_START_VBLANK_INTERRUPT_ENABLE);
 +                           PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
        return 0;
@@@ -2465,12 -2288,12 +2464,12 @@@ static void i915_disable_vblank(struct 
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      if (dev_priv->info->gen == 3)
 +      if (INTEL_INFO(dev)->gen == 3)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
  
        i915_disable_pipestat(dev_priv, pipe,
 -                            PIPE_VBLANK_INTERRUPT_ENABLE |
 -                            PIPE_START_VBLANK_INTERRUPT_ENABLE);
 +                            PIPE_VBLANK_INTERRUPT_STATUS |
 +                            PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
@@@ -2490,10 -2313,17 +2489,10 @@@ static void valleyview_disable_vblank(s
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
 -      u32 imr;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_disable_pipestat(dev_priv, pipe,
 -                            PIPE_START_VBLANK_INTERRUPT_ENABLE);
 -      imr = I915_READ(VLV_IMR);
 -      if (pipe == PIPE_A)
 -              imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 -      else
 -              imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 -      I915_WRITE(VLV_IMR, imr);
 +                            PIPE_START_VBLANK_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
@@@ -2605,9 -2435,9 +2604,9 @@@ ring_stuck(struct intel_ring_buffer *ri
         */
        tmp = I915_READ_CTL(ring);
        if (tmp & RING_WAIT) {
 -              DRM_ERROR("Kicking stuck wait on %s\n",
 -                        ring->name);
 -              i915_handle_error(dev, false);
 +              i915_handle_error(dev, false,
 +                                "Kicking stuck wait on %s",
 +                                ring->name);
                I915_WRITE_CTL(ring, tmp);
                return HANGCHECK_KICK;
        }
                default:
                        return HANGCHECK_HUNG;
                case 1:
 -                      DRM_ERROR("Kicking stuck semaphore on %s\n",
 -                                ring->name);
 -                      i915_handle_error(dev, false);
 +                      i915_handle_error(dev, false,
 +                                        "Kicking stuck semaphore on %s",
 +                                        ring->name);
                        I915_WRITE_CTL(ring, tmp);
                        return HANGCHECK_KICK;
                case 0:
@@@ -2649,8 -2479,9 +2648,8 @@@ static void i915_hangcheck_elapsed(unsi
  #define BUSY 1
  #define KICK 5
  #define HUNG 20
 -#define FIRE 30
  
 -      if (!i915_enable_hangcheck)
 +      if (!i915.enable_hangcheck)
                return;
  
        for_each_ring(ring, dev_priv, i) {
        }
  
        for_each_ring(ring, dev_priv, i) {
 -              if (ring->hangcheck.score > FIRE) {
 +              if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
                        DRM_INFO("%s on %s\n",
                                 stuck[i] ? "stuck" : "no progress",
                                 ring->name);
        }
  
        if (rings_hung)
 -              return i915_handle_error(dev, true);
 +              return i915_handle_error(dev, true, "Ring hung");
  
        if (busy_count)
                /* Reset timer case chip hangs without another request
  void i915_queue_hangcheck(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      if (!i915_enable_hangcheck)
 +      if (!i915.enable_hangcheck)
                return;
  
        mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@@ -2801,6 -2632,8 +2800,6 @@@ static void ironlake_irq_preinstall(str
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        I915_WRITE(HWSTAM, 0xeffe);
  
        I915_WRITE(DEIMR, 0xffffffff);
@@@ -2817,6 -2650,8 +2816,6 @@@ static void valleyview_irq_preinstall(s
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        /* VLV magic */
        I915_WRITE(VLV_IMR, 0);
        I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@@ -2846,6 -2681,8 +2845,6 @@@ static void gen8_irq_preinstall(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
  
@@@ -3037,113 -2874,44 +3036,113 @@@ static int ironlake_irq_postinstall(str
        return 0;
  }
  
 +static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
 +{
 +      u32 pipestat_mask;
 +      u32 iir_mask;
 +
 +      pipestat_mask = PIPESTAT_INT_STATUS_MASK |
 +                      PIPE_FIFO_UNDERRUN_STATUS;
 +
 +      I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
 +      I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
 +      POSTING_READ(PIPESTAT(PIPE_A));
 +
 +      pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
 +                      PIPE_CRC_DONE_INTERRUPT_STATUS;
 +
 +      i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
 +                                             PIPE_GMBUS_INTERRUPT_STATUS);
 +      i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
 +
 +      iir_mask = I915_DISPLAY_PORT_INTERRUPT |
 +                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 +                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
 +      dev_priv->irq_mask &= ~iir_mask;
 +
 +      I915_WRITE(VLV_IIR, iir_mask);
 +      I915_WRITE(VLV_IIR, iir_mask);
 +      I915_WRITE(VLV_IMR, dev_priv->irq_mask);
 +      I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
 +      POSTING_READ(VLV_IER);
 +}
 +
 +static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
 +{
 +      u32 pipestat_mask;
 +      u32 iir_mask;
 +
 +      iir_mask = I915_DISPLAY_PORT_INTERRUPT |
 +                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 +                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
 +
 +      dev_priv->irq_mask |= iir_mask;
 +      I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
 +      I915_WRITE(VLV_IMR, dev_priv->irq_mask);
 +      I915_WRITE(VLV_IIR, iir_mask);
 +      I915_WRITE(VLV_IIR, iir_mask);
 +      POSTING_READ(VLV_IIR);
 +
 +      pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
 +                      PIPE_CRC_DONE_INTERRUPT_STATUS;
 +
 +      i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
 +                                              PIPE_GMBUS_INTERRUPT_STATUS);
 +      i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
 +
 +      pipestat_mask = PIPESTAT_INT_STATUS_MASK |
 +                      PIPE_FIFO_UNDERRUN_STATUS;
 +      I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
 +      I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
 +      POSTING_READ(PIPESTAT(PIPE_A));
 +}
 +
 +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
 +{
 +      assert_spin_locked(&dev_priv->irq_lock);
 +
 +      if (dev_priv->display_irqs_enabled)
 +              return;
 +
 +      dev_priv->display_irqs_enabled = true;
 +
 +      if (dev_priv->dev->irq_enabled)
 +              valleyview_display_irqs_install(dev_priv);
 +}
 +
 +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 +{
 +      assert_spin_locked(&dev_priv->irq_lock);
 +
 +      if (!dev_priv->display_irqs_enabled)
 +              return;
 +
 +      dev_priv->display_irqs_enabled = false;
 +
 +      if (dev_priv->dev->irq_enabled)
 +              valleyview_display_irqs_uninstall(dev_priv);
 +}
 +
  static int valleyview_irq_postinstall(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 -      u32 enable_mask;
 -      u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
 -              PIPE_CRC_DONE_ENABLE;
        unsigned long irqflags;
  
 -      enable_mask = I915_DISPLAY_PORT_INTERRUPT;
 -      enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
 -              I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
 -              I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
 -              I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 -
 -      /*
 -       *Leave vblank interrupts masked initially.  enable/disable will
 -       * toggle them based on usage.
 -       */
 -      dev_priv->irq_mask = (~enable_mask) |
 -              I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
 -              I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 +      dev_priv->irq_mask = ~0;
  
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
  
        I915_WRITE(VLV_IMR, dev_priv->irq_mask);
 -      I915_WRITE(VLV_IER, enable_mask);
 +      I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
        I915_WRITE(VLV_IIR, 0xffffffff);
 -      I915_WRITE(PIPESTAT(0), 0xffff);
 -      I915_WRITE(PIPESTAT(1), 0xffff);
        POSTING_READ(VLV_IER);
  
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
 -      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
 -      i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
 +      if (dev_priv->display_irqs_enabled)
 +              valleyview_display_irqs_install(dev_priv);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
        I915_WRITE(VLV_IIR, 0xffffffff);
@@@ -3239,6 -3007,8 +3238,6 @@@ static void gen8_irq_uninstall(struct d
        if (!dev_priv)
                return;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        I915_WRITE(GEN8_MASTER_IRQ, 0);
  
  #define GEN8_IRQ_FINI_NDX(type, which) do { \
  static void valleyview_irq_uninstall(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 +      unsigned long irqflags;
        int pipe;
  
        if (!dev_priv)
                return;
  
 -      del_timer_sync(&dev_priv->hotplug_reenable_timer);
 +      intel_hpd_irq_uninstall(dev_priv);
  
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
        I915_WRITE(HWSTAM, 0xffffffff);
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 -      for_each_pipe(pipe)
 -              I915_WRITE(PIPESTAT(pipe), 0xffff);
 +
 +      spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 +      if (dev_priv->display_irqs_enabled)
 +              valleyview_display_irqs_uninstall(dev_priv);
 +      spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 +
 +      dev_priv->irq_mask = 0;
 +
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IMR, 0xffffffff);
        I915_WRITE(VLV_IER, 0x0);
@@@ -3309,7 -3072,7 +3308,7 @@@ static void ironlake_irq_uninstall(stru
        if (!dev_priv)
                return;
  
 -      del_timer_sync(&dev_priv->hotplug_reenable_timer);
 +      intel_hpd_irq_uninstall(dev_priv);
  
        I915_WRITE(HWSTAM, 0xffffffff);
  
@@@ -3338,6 -3101,8 +3337,6 @@@ static void i8xx_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0);
        I915_WRITE16(IMR, 0xffff);
@@@ -3372,8 -3137,8 +3371,8 @@@ static int i8xx_irq_postinstall(struct 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
 -      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 +      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
 +      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
        return 0;
@@@ -3422,6 -3187,8 +3421,6 @@@ static irqreturn_t i8xx_irq_handler(in
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  
 -      atomic_inc(&dev_priv->irq_received);
 -
        iir = I915_READ16(IIR);
        if (iir == 0)
                return IRQ_NONE;
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 -                      i915_handle_error(dev, false);
 +                      i915_handle_error(dev, false,
 +                                        "Command parser error, iir 0x%08x",
 +                                        iir);
  
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
                        /*
                         * Clear the PIPE*STAT regs before the IIR
                         */
 -                      if (pipe_stats[pipe] & 0x8000ffff) {
 -                              if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 -                                      DRM_DEBUG_DRIVER("pipe %c underrun\n",
 -                                                       pipe_name(pipe));
 +                      if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
 -                      }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 +
 +                      if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
 +                          intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
 +                              DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
  
                iir = new_iir;
@@@ -3501,6 -3266,8 +3500,6 @@@ static void i915_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@@ -3557,8 -3324,8 +3556,8 @@@ static int i915_irq_postinstall(struct 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
 -      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 +      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
 +      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
        return 0;
@@@ -3606,6 -3373,8 +3605,6 @@@ static irqreturn_t i915_irq_handler(in
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
        int pipe, ret = IRQ_NONE;
  
 -      atomic_inc(&dev_priv->irq_received);
 -
        iir = I915_READ(IIR);
        do {
                bool irq_received = (iir & ~flip_mask) != 0;
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 -                      i915_handle_error(dev, false);
 +                      i915_handle_error(dev, false,
 +                                        "Command parser error, iir 0x%08x",
 +                                        iir);
  
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
  
                        /* Clear the PIPE*STAT regs before the IIR */
                        if (pipe_stats[pipe] & 0x8000ffff) {
 -                              if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 -                                      DRM_DEBUG_DRIVER("pipe %c underrun\n",
 -                                                       pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
                                irq_received = true;
                        }
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  
 -                      DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 -                                hotplug_status);
 -
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 +
 +                      if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
 +                          intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
 +                              DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
  
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@@ -3707,7 -3476,7 +3706,7 @@@ static void i915_irq_uninstall(struct d
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 -      del_timer_sync(&dev_priv->hotplug_reenable_timer);
 +      intel_hpd_irq_uninstall(dev_priv);
  
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
@@@ -3731,6 -3500,8 +3730,6 @@@ static void i965_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
 -      atomic_set(&dev_priv->irq_received, 0);
 -
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  
@@@ -3769,9 -3540,9 +3768,9 @@@ static int i965_irq_postinstall(struct 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 -      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
 -      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
 -      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 +      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
 +      i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
 +      i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
        /*
@@@ -3839,17 -3610,21 +3838,17 @@@ static irqreturn_t i965_irq_handler(in
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
        unsigned long irqflags;
 -      int irq_received;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  
 -      atomic_inc(&dev_priv->irq_received);
 -
        iir = I915_READ(IIR);
  
        for (;;) {
 +              bool irq_received = (iir & ~flip_mask) != 0;
                bool blc_event = false;
  
 -              irq_received = (iir & ~flip_mask) != 0;
 -
                /* Can't rely on pipestat interrupt bit in iir as it might
                 * have been cleared after the pipestat interrupt was received.
                 * It doesn't set the bit in iir again, but it still produces
                 */
                spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 -                      i915_handle_error(dev, false);
 +                      i915_handle_error(dev, false,
 +                                        "Command parser error, iir 0x%08x",
 +                                        iir);
  
                for_each_pipe(pipe) {
                        int reg = PIPESTAT(pipe);
                         * Clear the PIPE*STAT regs before the IIR
                         */
                        if (pipe_stats[pipe] & 0x8000ffff) {
 -                              if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
 -                                      DRM_DEBUG_DRIVER("pipe %c underrun\n",
 -                                                       pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
 -                              irq_received = 1;
 +                              irq_received = true;
                        }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
                                                                  HOTPLUG_INT_STATUS_G4X :
                                                                  HOTPLUG_INT_STATUS_I915);
  
 -                      DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 -                                hotplug_status);
 -
                        intel_hpd_irq_handler(dev, hotplug_trigger,
                                              IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
  
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
 -              }
  
 +                      if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
 +                          intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
 +                              DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
 +              }
  
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
                        intel_opregion_asle_intr(dev);
@@@ -3959,7 -3735,7 +3958,7 @@@ static void i965_irq_uninstall(struct d
        if (!dev_priv)
                return;
  
 -      del_timer_sync(&dev_priv->hotplug_reenable_timer);
 +      intel_hpd_irq_uninstall(dev_priv);
  
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
        I915_WRITE(IIR, I915_READ(IIR));
  }
  
 -static void i915_reenable_hotplug_timer_func(unsigned long data)
 +static void intel_hpd_irq_reenable(unsigned long data)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
        struct drm_device *dev = dev_priv->dev;
@@@ -4023,7 -3799,7 +4022,7 @@@ void intel_irq_init(struct drm_device *
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
 -      setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
 +      setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
                    (unsigned long) dev_priv);
  
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@@ -51,10 -51,7 +51,10 @@@ static void ironlake_pch_clock_get(stru
  
  static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
                          int x, int y, struct drm_framebuffer *old_fb);
 -
 +static int intel_framebuffer_init(struct drm_device *dev,
 +                                struct intel_framebuffer *ifb,
 +                                struct drm_mode_fb_cmd2 *mode_cmd,
 +                                struct drm_i915_gem_object *obj);
  
  typedef struct {
        int     min, max;
@@@ -1033,7 -1030,7 +1033,7 @@@ static void assert_fdi_tx_pll_enabled(s
        u32 val;
  
        /* ILK FDI PLL is always enabled */
 -      if (dev_priv->info->gen == 5)
 +      if (INTEL_INFO(dev_priv->dev)->gen == 5)
                return;
  
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@@ -1095,12 -1092,12 +1095,12 @@@ static void assert_cursor(struct drm_i9
        struct drm_device *dev = dev_priv->dev;
        bool cur_state;
  
-       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-               cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
-       else if (IS_845G(dev) || IS_I865G(dev))
+       if (IS_845G(dev) || IS_I865G(dev))
                cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
-       else
+       else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
                cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+       else
+               cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
  
        WARN(cur_state != state,
             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@@ -1122,7 -1119,7 +1122,7 @@@ void assert_pipe(struct drm_i915_privat
        if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
                state = true;
  
 -      if (!intel_display_power_enabled(dev_priv->dev,
 +      if (!intel_display_power_enabled(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
                cur_state = false;
        } else {
@@@ -1188,16 -1185,16 +1188,16 @@@ static void assert_sprites_disabled(str
                                    enum pipe pipe)
  {
        struct drm_device *dev = dev_priv->dev;
 -      int reg, i;
 +      int reg, sprite;
        u32 val;
  
        if (IS_VALLEYVIEW(dev)) {
 -              for (i = 0; i < dev_priv->num_plane; i++) {
 -                      reg = SPCNTR(pipe, i);
 +              for_each_sprite(pipe, sprite) {
 +                      reg = SPCNTR(pipe, sprite);
                        val = I915_READ(reg);
                        WARN((val & SP_ENABLE),
                             "sprite %c assertion failure, should be off on pipe %c but is still active\n",
 -                           sprite_name(pipe, i), pipe_name(pipe));
 +                           sprite_name(pipe, sprite), pipe_name(pipe));
                }
        } else if (INTEL_INFO(dev)->gen >= 7) {
                reg = SPRCTL(pipe);
@@@ -1446,7 -1443,7 +1446,7 @@@ static void i9xx_enable_pll(struct inte
        assert_pipe_disabled(dev_priv, crtc->pipe);
  
        /* No really, not for ILK+ */
 -      BUG_ON(dev_priv->info->gen >= 5);
 +      BUG_ON(INTEL_INFO(dev)->gen >= 5);
  
        /* PLL is protected by panel, make sure we can write it */
        if (IS_MOBILE(dev) && !IS_I830(dev))
@@@ -1552,12 -1549,11 +1552,12 @@@ void vlv_wait_port_ready(struct drm_i91
   */
  static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
  {
 -      struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
  
        /* PCH PLLs only available on ILK, SNB and IVB */
 -      BUG_ON(dev_priv->info->gen < 5);
 +      BUG_ON(INTEL_INFO(dev)->gen < 5);
        if (WARN_ON(pll == NULL))
                return;
  
  
  static void intel_disable_shared_dpll(struct intel_crtc *crtc)
  {
 -      struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
  
        /* PCH only available on ILK+ */
 -      BUG_ON(dev_priv->info->gen < 5);
 +      BUG_ON(INTEL_INFO(dev)->gen < 5);
        if (WARN_ON(pll == NULL))
               return;
  
@@@ -1622,7 -1617,7 +1622,7 @@@ static void ironlake_enable_pch_transco
        uint32_t reg, val, pipeconf_val;
  
        /* PCH only available on ILK+ */
 -      BUG_ON(dev_priv->info->gen < 5);
 +      BUG_ON(INTEL_INFO(dev)->gen < 5);
  
        /* Make sure PCH DPLL is enabled */
        assert_shared_dpll_enabled(dev_priv,
@@@ -1675,7 -1670,7 +1675,7 @@@ static void lpt_enable_pch_transcoder(s
        u32 val, pipeconf_val;
  
        /* PCH only available on ILK+ */
 -      BUG_ON(dev_priv->info->gen < 5);
 +      BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
  
        /* FDI must be feeding us bits for PCH ports */
        assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@@ -1749,16 -1744,21 +1749,16 @@@ static void lpt_disable_pch_transcoder(
  
  /**
   * intel_enable_pipe - enable a pipe, asserting requirements
 - * @dev_priv: i915 private structure
 - * @pipe: pipe to enable
 - * @pch_port: on ILK+, is this pipe driving a PCH port or not
 + * @crtc: crtc responsible for the pipe
   *
 - * Enable @pipe, making sure that various hardware specific requirements
 + * Enable @crtc's pipe, making sure that various hardware specific requirements
   * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
 - *
 - * @pipe should be %PIPE_A or %PIPE_B.
 - *
 - * Will wait until the pipe is actually running (i.e. first vblank) before
 - * returning.
   */
 -static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 -                            bool pch_port, bool dsi)
 +static void intel_enable_pipe(struct intel_crtc *crtc)
  {
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
        enum pipe pch_transcoder;
         * need the check.
         */
        if (!HAS_PCH_SPLIT(dev_priv->dev))
 -              if (dsi)
 +              if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
        else {
 -              if (pch_port) {
 +              if (crtc->config.has_pch_encoder) {
                        /* if driving the PCH, we need FDI enabled */
                        assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
                        assert_fdi_tx_pll_enabled(dev_priv,
  
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
 -      if (val & PIPECONF_ENABLE)
 +      if (val & PIPECONF_ENABLE) {
 +              WARN_ON(!(pipe == PIPE_A &&
 +                        dev_priv->quirks & QUIRK_PIPEA_FORCE));
                return;
 +      }
  
        I915_WRITE(reg, val | PIPECONF_ENABLE);
 -      intel_wait_for_vblank(dev_priv->dev, pipe);
 +      POSTING_READ(reg);
 +
 +      /*
 +       * There's no guarantee the pipe will really start running now. It
 +       * depends on the Gen, the output type and the relative order between
 +       * pipe and plane enabling. Avoid waiting on HSW+ since it's not
 +       * necessary.
 +       * TODO: audit the previous gens.
 +       */
 +      if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
 +              intel_wait_for_vblank(dev_priv->dev, pipe);
  }
  
  /**
@@@ -1864,8 -1851,7 +1864,8 @@@ static void intel_disable_pipe(struct d
  void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
                               enum plane plane)
  {
 -      u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
 +      struct drm_device *dev = dev_priv->dev;
 +      u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
  
        I915_WRITE(reg, I915_READ(reg));
        POSTING_READ(reg);
@@@ -1943,14 -1929,6 +1943,14 @@@ static bool need_vtd_wa(struct drm_devi
        return false;
  }
  
 +static int intel_align_height(struct drm_device *dev, int height, bool tiled)
 +{
 +      int tile_height;
 +
 +      tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
 +      return ALIGN(height, tile_height);
 +}
 +
  int
  intel_pin_and_fence_fb_obj(struct drm_device *dev,
                           struct drm_i915_gem_object *obj,
@@@ -2047,111 -2025,6 +2047,111 @@@ unsigned long intel_gen4_compute_page_o
        }
  }
  
 +int intel_format_to_fourcc(int format)
 +{
 +      switch (format) {
 +      case DISPPLANE_8BPP:
 +              return DRM_FORMAT_C8;
 +      case DISPPLANE_BGRX555:
 +              return DRM_FORMAT_XRGB1555;
 +      case DISPPLANE_BGRX565:
 +              return DRM_FORMAT_RGB565;
 +      default:
 +      case DISPPLANE_BGRX888:
 +              return DRM_FORMAT_XRGB8888;
 +      case DISPPLANE_RGBX888:
 +              return DRM_FORMAT_XBGR8888;
 +      case DISPPLANE_BGRX101010:
 +              return DRM_FORMAT_XRGB2101010;
 +      case DISPPLANE_RGBX101010:
 +              return DRM_FORMAT_XBGR2101010;
 +      }
 +}
 +
 +static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
 +                                struct intel_plane_config *plane_config)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_gem_object *obj = NULL;
 +      struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 +      u32 base = plane_config->base;
 +
 +      if (plane_config->size == 0)
 +              return false;
 +
 +      obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
 +                                                           plane_config->size);
 +      if (!obj)
 +              return false;
 +
 +      if (plane_config->tiled) {
 +              obj->tiling_mode = I915_TILING_X;
 +              obj->stride = crtc->base.fb->pitches[0];
 +      }
 +
 +      mode_cmd.pixel_format = crtc->base.fb->pixel_format;
 +      mode_cmd.width = crtc->base.fb->width;
 +      mode_cmd.height = crtc->base.fb->height;
 +      mode_cmd.pitches[0] = crtc->base.fb->pitches[0];
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.fb),
 +                                 &mode_cmd, obj)) {
 +              DRM_DEBUG_KMS("intel fb init failed\n");
 +              goto out_unref_obj;
 +      }
 +
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      DRM_DEBUG_KMS("plane fb obj %p\n", obj);
 +      return true;
 +
 +out_unref_obj:
 +      drm_gem_object_unreference(&obj->base);
 +      mutex_unlock(&dev->struct_mutex);
 +      return false;
 +}
 +
 +static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
 +                               struct intel_plane_config *plane_config)
 +{
 +      struct drm_device *dev = intel_crtc->base.dev;
 +      struct drm_crtc *c;
 +      struct intel_crtc *i;
 +      struct intel_framebuffer *fb;
 +
 +      if (!intel_crtc->base.fb)
 +              return;
 +
 +      if (intel_alloc_plane_obj(intel_crtc, plane_config))
 +              return;
 +
 +      kfree(intel_crtc->base.fb);
 +      intel_crtc->base.fb = NULL;
 +
 +      /*
 +       * Failed to alloc the obj, check to see if we should share
 +       * an fb with another CRTC instead
 +       */
 +      list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
 +              i = to_intel_crtc(c);
 +
 +              if (c == &intel_crtc->base)
 +                      continue;
 +
 +              if (!i->active || !c->fb)
 +                      continue;
 +
 +              fb = to_intel_framebuffer(c->fb);
 +              if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
 +                      drm_framebuffer_reference(c->fb);
 +                      intel_crtc->base.fb = c->fb;
 +                      break;
 +              }
 +      }
 +}
 +
  static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                             int x, int y)
  {
@@@ -2426,23 -2299,31 +2426,23 @@@ intel_finish_fb(struct drm_framebuffer 
        return ret;
  }
  
 -static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
 +static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
  {
        struct drm_device *dev = crtc->dev;
 -      struct drm_i915_master_private *master_priv;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      unsigned long flags;
 +      bool pending;
  
 -      if (!dev->primary->master)
 -              return;
 +      if (i915_reset_in_progress(&dev_priv->gpu_error) ||
 +          intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
 +              return false;
  
 -      master_priv = dev->primary->master->driver_priv;
 -      if (!master_priv->sarea_priv)
 -              return;
 +      spin_lock_irqsave(&dev->event_lock, flags);
 +      pending = to_intel_crtc(crtc)->unpin_work != NULL;
 +      spin_unlock_irqrestore(&dev->event_lock, flags);
  
 -      switch (intel_crtc->pipe) {
 -      case 0:
 -              master_priv->sarea_priv->pipeA_x = x;
 -              master_priv->sarea_priv->pipeA_y = y;
 -              break;
 -      case 1:
 -              master_priv->sarea_priv->pipeB_x = x;
 -              master_priv->sarea_priv->pipeB_y = y;
 -              break;
 -      default:
 -              break;
 -      }
 +      return pending;
  }
  
  static int
@@@ -2455,11 -2336,6 +2455,11 @@@ intel_pipe_set_base(struct drm_crtc *cr
        struct drm_framebuffer *old_fb;
        int ret;
  
 +      if (intel_crtc_has_pending_flip(crtc)) {
 +              DRM_ERROR("pipe is still busy with an old pageflip\n");
 +              return -EBUSY;
 +      }
 +
        /* no fb bound */
        if (!fb) {
                DRM_ERROR("No FB bound\n");
         * whether the platform allows pfit disable with pipe active, and only
         * then update the pipesrc and pfit state, even on the flip path.
         */
 -      if (i915_fastboot) {
 +      if (i915.fastboot) {
                const struct drm_display_mode *adjusted_mode =
                        &intel_crtc->config.adjusted_mode;
  
        intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
  
 -      intel_crtc_update_sarea_pos(crtc, x, y);
 -
        return 0;
  }
  
@@@ -3085,6 -2963,25 +3085,6 @@@ static void ironlake_fdi_disable(struc
        udelay(100);
  }
  
 -static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 -{
 -      struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      unsigned long flags;
 -      bool pending;
 -
 -      if (i915_reset_in_progress(&dev_priv->gpu_error) ||
 -          intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
 -              return false;
 -
 -      spin_lock_irqsave(&dev->event_lock, flags);
 -      pending = to_intel_crtc(crtc)->unpin_work != NULL;
 -      spin_unlock_irqrestore(&dev->event_lock, flags);
 -
 -      return pending;
 -}
 -
  bool intel_has_pending_fb_unpin(struct drm_device *dev)
  {
        struct intel_crtc *crtc;
@@@ -3690,7 -3587,8 +3690,7 @@@ static void ironlake_crtc_enable(struc
        intel_crtc_load_lut(crtc);
  
        intel_update_watermarks(crtc);
 -      intel_enable_pipe(dev_priv, pipe,
 -                        intel_crtc->config.has_pch_encoder, false);
 +      intel_enable_pipe(intel_crtc);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@@ -3835,7 -3733,8 +3835,7 @@@ static void haswell_crtc_enable(struct 
        intel_ddi_enable_transcoder_func(crtc);
  
        intel_update_watermarks(crtc);
 -      intel_enable_pipe(dev_priv, pipe,
 -                        intel_crtc->config.has_pch_encoder, false);
 +      intel_enable_pipe(intel_crtc);
  
        if (intel_crtc->config.has_pch_encoder)
                lpt_pch_enable(crtc);
         * to change the workaround. */
        haswell_mode_set_planes_workaround(intel_crtc);
        haswell_crtc_enable_planes(crtc);
 -
 -      /*
 -       * There seems to be a race in PCH platform hw (at least on some
 -       * outputs) where an enabled pipe still completes any pageflip right
 -       * away (as if the pipe is off) instead of waiting for vblank. As soon
 -       * as the first vblank happend, everything works as expected. Hence just
 -       * wait for one vblank before returning to avoid strange things
 -       * happening.
 -       */
 -      intel_wait_for_vblank(dev, intel_crtc->pipe);
  }
  
  static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@@ -4063,117 -3972,6 +4063,117 @@@ static void i9xx_pfit_enable(struct int
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
  }
  
 +#define for_each_power_domain(domain, mask)                           \
 +      for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
 +              if ((1 << (domain)) & (mask))
 +
 +enum intel_display_power_domain
 +intel_display_port_power_domain(struct intel_encoder *intel_encoder)
 +{
 +      struct drm_device *dev = intel_encoder->base.dev;
 +      struct intel_digital_port *intel_dig_port;
 +
 +      switch (intel_encoder->type) {
 +      case INTEL_OUTPUT_UNKNOWN:
 +              /* Only DDI platforms should ever use this output type */
 +              WARN_ON_ONCE(!HAS_DDI(dev));
 +      case INTEL_OUTPUT_DISPLAYPORT:
 +      case INTEL_OUTPUT_HDMI:
 +      case INTEL_OUTPUT_EDP:
 +              intel_dig_port = enc_to_dig_port(&intel_encoder->base);
 +              switch (intel_dig_port->port) {
 +              case PORT_A:
 +                      return POWER_DOMAIN_PORT_DDI_A_4_LANES;
 +              case PORT_B:
 +                      return POWER_DOMAIN_PORT_DDI_B_4_LANES;
 +              case PORT_C:
 +                      return POWER_DOMAIN_PORT_DDI_C_4_LANES;
 +              case PORT_D:
 +                      return POWER_DOMAIN_PORT_DDI_D_4_LANES;
 +              default:
 +                      WARN_ON_ONCE(1);
 +                      return POWER_DOMAIN_PORT_OTHER;
 +              }
 +      case INTEL_OUTPUT_ANALOG:
 +              return POWER_DOMAIN_PORT_CRT;
 +      case INTEL_OUTPUT_DSI:
 +              return POWER_DOMAIN_PORT_DSI;
 +      default:
 +              return POWER_DOMAIN_PORT_OTHER;
 +      }
 +}
 +
 +static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct intel_encoder *intel_encoder;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      enum pipe pipe = intel_crtc->pipe;
 +      bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
 +      unsigned long mask;
 +      enum transcoder transcoder;
 +
 +      transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 +
 +      mask = BIT(POWER_DOMAIN_PIPE(pipe));
 +      mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
 +      if (pfit_enabled)
 +              mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 +
 +      for_each_encoder_on_crtc(dev, crtc, intel_encoder)
 +              mask |= BIT(intel_display_port_power_domain(intel_encoder));
 +
 +      return mask;
 +}
 +
 +void intel_display_set_init_power(struct drm_i915_private *dev_priv,
 +                                bool enable)
 +{
 +      if (dev_priv->power_domains.init_power_on == enable)
 +              return;
 +
 +      if (enable)
 +              intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 +      else
 +              intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 +
 +      dev_priv->power_domains.init_power_on = enable;
 +}
 +
 +static void modeset_update_crtc_power_domains(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
 +      struct intel_crtc *crtc;
 +
 +      /*
 +       * First get all needed power domains, then put all unneeded, to avoid
 +       * any unnecessary toggling of the power wells.
 +       */
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 +              enum intel_display_power_domain domain;
 +
 +              if (!crtc->base.enabled)
 +                      continue;
 +
 +              pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
 +
 +              for_each_power_domain(domain, pipe_domains[crtc->pipe])
 +                      intel_display_power_get(dev_priv, domain);
 +      }
 +
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 +              enum intel_display_power_domain domain;
 +
 +              for_each_power_domain(domain, crtc->enabled_power_domains)
 +                      intel_display_power_put(dev_priv, domain);
 +
 +              crtc->enabled_power_domains = pipe_domains[crtc->pipe];
 +      }
 +
 +      intel_display_set_init_power(dev_priv, false);
 +}
 +
  int valleyview_get_vco(struct drm_i915_private *dev_priv)
  {
        int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@@ -4290,8 -4088,9 +4290,8 @@@ static int valleyview_calc_cdclk(struc
        /* Looks like the 200MHz CDclk freq doesn't work on some configs */
  }
  
 -static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
 -                               unsigned modeset_pipes,
 -                               struct intel_crtc_config *pipe_config)
 +/* compute the max pixel clock for new configuration */
 +static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *intel_crtc;
  
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
 -              if (modeset_pipes & (1 << intel_crtc->pipe))
 +              if (intel_crtc->new_enabled)
                        max_pixclk = max(max_pixclk,
 -                                       pipe_config->adjusted_mode.crtc_clock);
 -              else if (intel_crtc->base.enabled)
 -                      max_pixclk = max(max_pixclk,
 -                                       intel_crtc->config.adjusted_mode.crtc_clock);
 +                                       intel_crtc->new_config->adjusted_mode.crtc_clock);
        }
  
        return max_pixclk;
  }
  
  static void valleyview_modeset_global_pipes(struct drm_device *dev,
 -                                          unsigned *prepare_pipes,
 -                                          unsigned modeset_pipes,
 -                                          struct intel_crtc_config *pipe_config)
 +                                          unsigned *prepare_pipes)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc;
 -      int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
 -                                             pipe_config);
 +      int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
  
        if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
                return;
  
 +      /* disable/enable all currently active pipes while we change cdclk */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head)
                if (intel_crtc->base.enabled)
  static void valleyview_modeset_global_resources(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
 +      int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
        int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
  
        if (req_cdclk != cur_cdclk)
                valleyview_set_cdclk(dev, req_cdclk);
 +      modeset_update_crtc_power_domains(dev);
  }
  
  static void valleyview_crtc_enable(struct drm_crtc *crtc)
        intel_crtc_load_lut(crtc);
  
        intel_update_watermarks(crtc);
 -      intel_enable_pipe(dev_priv, pipe, false, is_dsi);
 +      intel_enable_pipe(intel_crtc);
 +      intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@@ -4411,8 -4213,7 +4411,8 @@@ static void i9xx_crtc_enable(struct drm
        intel_crtc_load_lut(crtc);
  
        intel_update_watermarks(crtc);
 -      intel_enable_pipe(dev_priv, pipe, false, false);
 +      intel_enable_pipe(intel_crtc);
 +      intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        /* The fixup needs to happen before cursor is enabled */
@@@ -4471,7 -4272,6 +4471,7 @@@ static void i9xx_crtc_disable(struct dr
        intel_disable_planes(crtc);
        intel_disable_primary_plane(dev_priv, plane, pipe);
  
 +      intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
        intel_disable_pipe(dev_priv, pipe);
  
        i9xx_pfit_disable(intel_crtc);
@@@ -4783,7 -4583,7 +4783,7 @@@ retry
  static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_config *pipe_config)
  {
 -      pipe_config->ips_enabled = i915_enable_ips &&
 +      pipe_config->ips_enabled = i915.enable_ips &&
                                   hsw_crtc_supports_ips(crtc) &&
                                   pipe_config->pipe_bpp <= 24;
  }
@@@ -4984,8 -4784,8 +4984,8 @@@ intel_link_compute_m_n(int bits_per_pix
  
  static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  {
 -      if (i915_panel_use_ssc >= 0)
 -              return i915_panel_use_ssc != 0;
 +      if (i915.panel_use_ssc >= 0)
 +              return i915.panel_use_ssc != 0;
        return dev_priv->vbt.lvds_use_ssc
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  }
@@@ -5044,7 -4844,7 +5044,7 @@@ static void i9xx_update_pll_dividers(st
  
        crtc->lowfreq_avail = false;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
 -          reduced_clock && i915_powersave) {
 +          reduced_clock && i915.powersave) {
                I915_WRITE(FP1(pipe), fp2);
                crtc->config.dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
@@@ -5459,23 -5259,25 +5459,23 @@@ static void intel_get_pipe_timings(stru
        pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
  }
  
 -static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
 -                                           struct intel_crtc_config *pipe_config)
 +void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 +                               struct intel_crtc_config *pipe_config)
  {
 -      struct drm_crtc *crtc = &intel_crtc->base;
 -
 -      crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
 -      crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
 -      crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
 -      crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
 +      mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
 +      mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
 +      mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
 +      mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
  
 -      crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
 -      crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
 -      crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
 -      crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
 +      mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
 +      mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
 +      mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
 +      mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
  
 -      crtc->mode.flags = pipe_config->adjusted_mode.flags;
 +      mode->flags = pipe_config->adjusted_mode.flags;
  
 -      crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
 -      crtc->mode.flags |= pipe_config->adjusted_mode.flags;
 +      mode->clock = pipe_config->adjusted_mode.crtc_clock;
 +      mode->flags |= pipe_config->adjusted_mode.flags;
  }
  
  static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@@ -5710,67 -5512,6 +5710,67 @@@ static void vlv_crtc_clock_get(struct i
        pipe_config->port_clock = clock.dot / 5;
  }
  
 +static void i9xx_get_plane_config(struct intel_crtc *crtc,
 +                                struct intel_plane_config *plane_config)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      u32 val, base, offset;
 +      int pipe = crtc->pipe, plane = crtc->plane;
 +      int fourcc, pixel_format;
 +      int aligned_height;
 +
 +      crtc->base.fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
 +      if (!crtc->base.fb) {
 +              DRM_DEBUG_KMS("failed to alloc fb\n");
 +              return;
 +      }
 +
 +      val = I915_READ(DSPCNTR(plane));
 +
 +      if (INTEL_INFO(dev)->gen >= 4)
 +              if (val & DISPPLANE_TILED)
 +                      plane_config->tiled = true;
 +
 +      pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
 +      fourcc = intel_format_to_fourcc(pixel_format);
 +      crtc->base.fb->pixel_format = fourcc;
 +      crtc->base.fb->bits_per_pixel =
 +              drm_format_plane_cpp(fourcc, 0) * 8;
 +
 +      if (INTEL_INFO(dev)->gen >= 4) {
 +              if (plane_config->tiled)
 +                      offset = I915_READ(DSPTILEOFF(plane));
 +              else
 +                      offset = I915_READ(DSPLINOFF(plane));
 +              base = I915_READ(DSPSURF(plane)) & 0xfffff000;
 +      } else {
 +              base = I915_READ(DSPADDR(plane));
 +      }
 +      plane_config->base = base;
 +
 +      val = I915_READ(PIPESRC(pipe));
 +      crtc->base.fb->width = ((val >> 16) & 0xfff) + 1;
 +      crtc->base.fb->height = ((val >> 0) & 0xfff) + 1;
 +
 +      val = I915_READ(DSPSTRIDE(pipe));
 +      crtc->base.fb->pitches[0] = val & 0xffffff80;
 +
 +      aligned_height = intel_align_height(dev, crtc->base.fb->height,
 +                                          plane_config->tiled);
 +
 +      plane_config->size = ALIGN(crtc->base.fb->pitches[0] *
 +                                 aligned_height, PAGE_SIZE);
 +
 +      DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 +                    pipe, plane, crtc->base.fb->width,
 +                    crtc->base.fb->height,
 +                    crtc->base.fb->bits_per_pixel, base,
 +                    crtc->base.fb->pitches[0],
 +                    plane_config->size);
 +
 +}
 +
  static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_config *pipe_config)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
  
 +      if (!intel_display_power_enabled(dev_priv,
 +                                       POWER_DOMAIN_PIPE(crtc->pipe)))
 +              return false;
 +
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
  
@@@ -6443,7 -6180,7 +6443,7 @@@ int ironlake_get_lanes_required(int tar
         * is 2.5%; use 5% for safety's sake.
         */
        u32 bps = target_clock * bpp * 21 / 20;
 -      return bps / (link_bw * 8) + 1;
 +      return DIV_ROUND_UP(bps, link_bw * 8);
  }
  
  static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
@@@ -6611,7 -6348,7 +6611,7 @@@ static int ironlake_crtc_mode_set(struc
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
  
 -      if (is_lvds && has_reduced_clock && i915_powersave)
 +      if (is_lvds && has_reduced_clock && i915.powersave)
                intel_crtc->lowfreq_avail = true;
        else
                intel_crtc->lowfreq_avail = false;
@@@ -6718,85 -6455,25 +6718,85 @@@ static void ironlake_get_pfit_config(st
        }
  }
  
 -static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 -                                   struct intel_crtc_config *pipe_config)
 +static void ironlake_get_plane_config(struct intel_crtc *crtc,
 +                                    struct intel_plane_config *plane_config)
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t tmp;
 +      u32 val, base, offset;
 +      int pipe = crtc->pipe, plane = crtc->plane;
 +      int fourcc, pixel_format;
 +      int aligned_height;
 +
 +      crtc->base.fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
 +      if (!crtc->base.fb) {
 +              DRM_DEBUG_KMS("failed to alloc fb\n");
 +              return;
 +      }
  
 -      pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 -      pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 +      val = I915_READ(DSPCNTR(plane));
  
 -      tmp = I915_READ(PIPECONF(crtc->pipe));
 -      if (!(tmp & PIPECONF_ENABLE))
 -              return false;
 +      if (INTEL_INFO(dev)->gen >= 4)
 +              if (val & DISPPLANE_TILED)
 +                      plane_config->tiled = true;
  
 -      switch (tmp & PIPECONF_BPC_MASK) {
 -      case PIPECONF_6BPC:
 -              pipe_config->pipe_bpp = 18;
 -              break;
 -      case PIPECONF_8BPC:
 +      pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
 +      fourcc = intel_format_to_fourcc(pixel_format);
 +      crtc->base.fb->pixel_format = fourcc;
 +      crtc->base.fb->bits_per_pixel =
 +              drm_format_plane_cpp(fourcc, 0) * 8;
 +
 +      base = I915_READ(DSPSURF(plane)) & 0xfffff000;
 +      if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 +              offset = I915_READ(DSPOFFSET(plane));
 +      } else {
 +              if (plane_config->tiled)
 +                      offset = I915_READ(DSPTILEOFF(plane));
 +              else
 +                      offset = I915_READ(DSPLINOFF(plane));
 +      }
 +      plane_config->base = base;
 +
 +      val = I915_READ(PIPESRC(pipe));
 +      crtc->base.fb->width = ((val >> 16) & 0xfff) + 1;
 +      crtc->base.fb->height = ((val >> 0) & 0xfff) + 1;
 +
 +      val = I915_READ(DSPSTRIDE(pipe));
 +      crtc->base.fb->pitches[0] = val & 0xffffff80;
 +
 +      aligned_height = intel_align_height(dev, crtc->base.fb->height,
 +                                          plane_config->tiled);
 +
 +      plane_config->size = ALIGN(crtc->base.fb->pitches[0] *
 +                                 aligned_height, PAGE_SIZE);
 +
 +      DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
 +                    pipe, plane, crtc->base.fb->width,
 +                    crtc->base.fb->height,
 +                    crtc->base.fb->bits_per_pixel, base,
 +                    crtc->base.fb->pitches[0],
 +                    plane_config->size);
 +}
 +
 +static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 +                                   struct intel_crtc_config *pipe_config)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      uint32_t tmp;
 +
 +      pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 +      pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 +
 +      tmp = I915_READ(PIPECONF(crtc->pipe));
 +      if (!(tmp & PIPECONF_ENABLE))
 +              return false;
 +
 +      switch (tmp & PIPECONF_BPC_MASK) {
 +      case PIPECONF_6BPC:
 +              pipe_config->pipe_bpp = 18;
 +              break;
 +      case PIPECONF_8BPC:
                pipe_config->pipe_bpp = 24;
                break;
        case PIPECONF_10BPC:
@@@ -7039,7 -6716,7 +7039,7 @@@ static void __hsw_enable_package_c8(str
                return;
  
        schedule_delayed_work(&dev_priv->pc8.enable_work,
 -                            msecs_to_jiffies(i915_pc8_timeout));
 +                            msecs_to_jiffies(i915.pc8_timeout));
  }
  
  static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
@@@ -7138,7 -6815,7 +7138,7 @@@ static void hsw_update_package_c8(struc
        if (!HAS_PC8(dev_priv->dev))
                return;
  
 -      if (!i915_enable_pc8)
 +      if (!i915.enable_pc8)
                return;
  
        mutex_lock(&dev_priv->pc8.lock);
@@@ -7159,9 -6836,105 +7159,9 @@@ done
        mutex_unlock(&dev_priv->pc8.lock);
  }
  
 -static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
 -{
 -      if (!HAS_PC8(dev_priv->dev))
 -              return;
 -
 -      mutex_lock(&dev_priv->pc8.lock);
 -      if (!dev_priv->pc8.gpu_idle) {
 -              dev_priv->pc8.gpu_idle = true;
 -              __hsw_enable_package_c8(dev_priv);
 -      }
 -      mutex_unlock(&dev_priv->pc8.lock);
 -}
 -
 -static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
 -{
 -      if (!HAS_PC8(dev_priv->dev))
 -              return;
 -
 -      mutex_lock(&dev_priv->pc8.lock);
 -      if (dev_priv->pc8.gpu_idle) {
 -              dev_priv->pc8.gpu_idle = false;
 -              __hsw_disable_package_c8(dev_priv);
 -      }
 -      mutex_unlock(&dev_priv->pc8.lock);
 -}
 -
 -#define for_each_power_domain(domain, mask)                           \
 -      for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
 -              if ((1 << (domain)) & (mask))
 -
 -static unsigned long get_pipe_power_domains(struct drm_device *dev,
 -                                          enum pipe pipe, bool pfit_enabled)
 -{
 -      unsigned long mask;
 -      enum transcoder transcoder;
 -
 -      transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 -
 -      mask = BIT(POWER_DOMAIN_PIPE(pipe));
 -      mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
 -      if (pfit_enabled)
 -              mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
 -
 -      return mask;
 -}
 -
 -void intel_display_set_init_power(struct drm_device *dev, bool enable)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (dev_priv->power_domains.init_power_on == enable)
 -              return;
 -
 -      if (enable)
 -              intel_display_power_get(dev, POWER_DOMAIN_INIT);
 -      else
 -              intel_display_power_put(dev, POWER_DOMAIN_INIT);
 -
 -      dev_priv->power_domains.init_power_on = enable;
 -}
 -
 -static void modeset_update_power_wells(struct drm_device *dev)
 -{
 -      unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
 -      struct intel_crtc *crtc;
 -
 -      /*
 -       * First get all needed power domains, then put all unneeded, to avoid
 -       * any unnecessary toggling of the power wells.
 -       */
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 -              enum intel_display_power_domain domain;
 -
 -              if (!crtc->base.enabled)
 -                      continue;
 -
 -              pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
 -                                              crtc->pipe,
 -                                              crtc->config.pch_pfit.enabled);
 -
 -              for_each_power_domain(domain, pipe_domains[crtc->pipe])
 -                      intel_display_power_get(dev, domain);
 -      }
 -
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 -              enum intel_display_power_domain domain;
 -
 -              for_each_power_domain(domain, crtc->enabled_power_domains)
 -                      intel_display_power_put(dev, domain);
 -
 -              crtc->enabled_power_domains = pipe_domains[crtc->pipe];
 -      }
 -
 -      intel_display_set_init_power(dev, false);
 -}
 -
  static void haswell_modeset_global_resources(struct drm_device *dev)
  {
 -      modeset_update_power_wells(dev);
 +      modeset_update_crtc_power_domains(dev);
        hsw_update_package_c8(dev);
  }
  
@@@ -7212,10 -6985,6 +7212,10 @@@ static bool haswell_get_pipe_config(str
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
  
 +      if (!intel_display_power_enabled(dev_priv,
 +                                       POWER_DOMAIN_PIPE(crtc->pipe)))
 +              return false;
 +
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
  
                        pipe_config->cpu_transcoder = TRANSCODER_EDP;
        }
  
 -      if (!intel_display_power_enabled(dev,
 +      if (!intel_display_power_enabled(dev_priv,
                        POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
                return false;
  
        intel_get_pipe_timings(crtc, pipe_config);
  
        pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
 -      if (intel_display_power_enabled(dev, pfit_domain))
 +      if (intel_display_power_enabled(dev_priv, pfit_domain))
                ironlake_get_pfit_config(crtc, pipe_config);
  
        if (IS_HASWELL(dev))
@@@ -7804,18 -7573,18 +7804,18 @@@ static int intel_crtc_cursor_set(struc
                return -ENOENT;
  
        if (obj->base.size < width * height * 4) {
 -              DRM_ERROR("buffer is to small\n");
 +              DRM_DEBUG_KMS("buffer is to small\n");
                ret = -ENOMEM;
                goto fail;
        }
  
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
 -      if (!dev_priv->info->cursor_needs_physical) {
 +      if (!INTEL_INFO(dev)->cursor_needs_physical) {
                unsigned alignment;
  
                if (obj->tiling_mode) {
 -                      DRM_ERROR("cursor cannot be tiled\n");
 +                      DRM_DEBUG_KMS("cursor cannot be tiled\n");
                        ret = -EINVAL;
                        goto fail_locked;
                }
  
                ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
                if (ret) {
 -                      DRM_ERROR("failed to move cursor bo into the GTT\n");
 +                      DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
                        goto fail_locked;
                }
  
                ret = i915_gem_object_put_fence(obj);
                if (ret) {
 -                      DRM_ERROR("failed to release fence for cursor");
 +                      DRM_DEBUG_KMS("failed to release fence for cursor");
                        goto fail_unpin;
                }
  
                                                  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
                                                  align);
                if (ret) {
 -                      DRM_ERROR("failed to attach phys object\n");
 +                      DRM_DEBUG_KMS("failed to attach phys object\n");
                        goto fail_locked;
                }
                addr = obj->phys_obj->handle->busaddr;
  
   finish:
        if (intel_crtc->cursor_bo) {
 -              if (dev_priv->info->cursor_needs_physical) {
 +              if (INTEL_INFO(dev)->cursor_needs_physical) {
                        if (intel_crtc->cursor_bo != obj)
                                i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
                } else
@@@ -7921,10 -7690,10 +7921,10 @@@ static struct drm_display_mode load_det
                 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
  };
  
 -static struct drm_framebuffer *
 -intel_framebuffer_create(struct drm_device *dev,
 -                       struct drm_mode_fb_cmd2 *mode_cmd,
 -                       struct drm_i915_gem_object *obj)
 +struct drm_framebuffer *
 +__intel_framebuffer_create(struct drm_device *dev,
 +                         struct drm_mode_fb_cmd2 *mode_cmd,
 +                         struct drm_i915_gem_object *obj)
  {
        struct intel_framebuffer *intel_fb;
        int ret;
                return ERR_PTR(-ENOMEM);
        }
  
 -      ret = i915_mutex_lock_interruptible(dev);
 -      if (ret)
 -              goto err;
 -
        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
 -      mutex_unlock(&dev->struct_mutex);
        if (ret)
                goto err;
  
        return ERR_PTR(ret);
  }
  
 +static struct drm_framebuffer *
 +intel_framebuffer_create(struct drm_device *dev,
 +                       struct drm_mode_fb_cmd2 *mode_cmd,
 +                       struct drm_i915_gem_object *obj)
 +{
 +      struct drm_framebuffer *fb;
 +      int ret;
 +
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              return ERR_PTR(ret);
 +      fb = __intel_framebuffer_create(dev, mode_cmd, obj);
 +      mutex_unlock(&dev->struct_mutex);
 +
 +      return fb;
 +}
 +
  static u32
  intel_framebuffer_pitch_for_width(int width, int bpp)
  {
@@@ -8009,16 -7766,14 +8009,16 @@@ mode_fits_in_fbdev(struct drm_device *d
        struct drm_i915_gem_object *obj;
        struct drm_framebuffer *fb;
  
 -      if (dev_priv->fbdev == NULL)
 +      if (!dev_priv->fbdev)
                return NULL;
  
 -      obj = dev_priv->fbdev->ifb.obj;
 -      if (obj == NULL)
 +      if (!dev_priv->fbdev->fb)
                return NULL;
  
 -      fb = &dev_priv->fbdev->ifb.base;
 +      obj = dev_priv->fbdev->fb->obj;
 +      BUG_ON(!obj);
 +
 +      fb = &dev_priv->fbdev->fb->base;
        if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
                                                               fb->bits_per_pixel))
                return NULL;
@@@ -8100,8 -7855,6 +8100,8 @@@ bool intel_get_load_detect_pipe(struct 
        to_intel_connector(connector)->new_encoder = intel_encoder;
  
        intel_crtc = to_intel_crtc(crtc);
 +      intel_crtc->new_enabled = true;
 +      intel_crtc->new_config = &intel_crtc->config;
        old->dpms_mode = connector->dpms;
        old->load_detect_temp = true;
        old->release_fb = NULL;
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
 -              mutex_unlock(&crtc->mutex);
 -              return false;
 +              goto fail;
        }
  
        if (intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
 -              mutex_unlock(&crtc->mutex);
 -              return false;
 +              goto fail;
        }
  
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
        return true;
 +
 + fail:
 +      intel_crtc->new_enabled = crtc->enabled;
 +      if (intel_crtc->new_enabled)
 +              intel_crtc->new_config = &intel_crtc->config;
 +      else
 +              intel_crtc->new_config = NULL;
 +      mutex_unlock(&crtc->mutex);
 +      return false;
  }
  
  void intel_release_load_detect_pipe(struct drm_connector *connector,
                intel_attached_encoder(connector);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector),
        if (old->load_detect_temp) {
                to_intel_connector(connector)->new_encoder = NULL;
                intel_encoder->new_crtc = NULL;
 +              intel_crtc->new_enabled = false;
 +              intel_crtc->new_config = NULL;
                intel_set_mode(crtc, NULL, 0, 0, NULL);
  
                if (old->release_fb) {
@@@ -8447,12 -8190,8 +8447,12 @@@ void intel_mark_busy(struct drm_device 
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      hsw_package_c8_gpu_busy(dev_priv);
 +      if (dev_priv->mm.busy)
 +              return;
 +
 +      hsw_disable_package_c8(dev_priv);
        i915_update_gfx_val(dev_priv);
 +      dev_priv->mm.busy = true;
  }
  
  void intel_mark_idle(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
  
 -      hsw_package_c8_gpu_idle(dev_priv);
 -
 -      if (!i915_powersave)
 +      if (!dev_priv->mm.busy)
                return;
  
 +      dev_priv->mm.busy = false;
 +
 +      if (!i915.powersave)
 +              goto out;
 +
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (!crtc->fb)
                        continue;
                intel_decrease_pllclock(crtc);
        }
  
 -      if (dev_priv->info->gen >= 6)
 +      if (INTEL_INFO(dev)->gen >= 6)
                gen6_rps_idle(dev->dev_private);
 +
 +out:
 +      hsw_enable_package_c8(dev_priv);
  }
  
  void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_crtc *crtc;
  
 -      if (!i915_powersave)
 +      if (!i915.powersave)
                return;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@@ -8853,6 -8586,20 +8853,20 @@@ static int intel_gen7_queue_flip(struc
        if (ring->id == RCS)
                len += 6;
  
+       /*
+        * BSpec MI_DISPLAY_FLIP for IVB:
+        * "The full packet must be contained within the same cache line."
+        *
+        * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+        * cacheline, if we ever start emitting more commands before
+        * the MI_DISPLAY_FLIP we may need to first emit everything else,
+        * then do the cacheline alignment, and finally emit the
+        * MI_DISPLAY_FLIP.
+        */
+       ret = intel_ring_cacheline_align(ring);
+       if (ret)
+               goto err_unpin;
        ret = intel_ring_begin(ring, len);
        if (ret)
                goto err_unpin;
@@@ -8929,9 -8676,6 +8943,9 @@@ static int intel_crtc_page_flip(struct 
             fb->pitches[0] != crtc->fb->pitches[0]))
                return -EINVAL;
  
 +      if (i915_terminally_wedged(&dev_priv->gpu_error))
 +              goto out_hang;
 +
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL)
                return -ENOMEM;
@@@ -9006,13 -8750,6 +9020,13 @@@ cleanup
  free_work:
        kfree(work);
  
 +      if (ret == -EIO) {
 +out_hang:
 +              intel_crtc_wait_for_pending_flips(crtc);
 +              ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
 +              if (ret == 0 && event)
 +                      drm_send_vblank_event(dev, intel_crtc->pipe, event);
 +      }
        return ret;
  }
  
@@@ -9029,7 -8766,6 +9043,7 @@@ static struct drm_crtc_helper_funcs int
   */
  static void intel_modeset_update_staged_output_state(struct drm_device *dev)
  {
 +      struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
  
                encoder->new_crtc =
                        to_intel_crtc(encoder->base.crtc);
        }
 +
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 +                          base.head) {
 +              crtc->new_enabled = crtc->base.enabled;
 +
 +              if (crtc->new_enabled)
 +                      crtc->new_config = &crtc->config;
 +              else
 +                      crtc->new_config = NULL;
 +      }
  }
  
  /**
   */
  static void intel_modeset_commit_output_state(struct drm_device *dev)
  {
 +      struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
  
                            base.head) {
                encoder->base.crtc = &encoder->new_crtc->base;
        }
 +
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 +                          base.head) {
 +              crtc->base.enabled = crtc->new_enabled;
 +      }
  }
  
  static void
@@@ -9221,47 -8941,23 +9235,47 @@@ static void intel_dump_pipe_config(stru
        DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
  }
  
 -static bool check_encoder_cloning(struct drm_crtc *crtc)
 +static bool encoders_cloneable(const struct intel_encoder *a,
 +                             const struct intel_encoder *b)
 +{
 +      /* masks could be asymmetric, so check both ways */
 +      return a == b || (a->cloneable & (1 << b->type) &&
 +                        b->cloneable & (1 << a->type));
 +}
 +
 +static bool check_single_encoder_cloning(struct intel_crtc *crtc,
 +                                       struct intel_encoder *encoder)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct intel_encoder *source_encoder;
 +
 +      list_for_each_entry(source_encoder,
 +                          &dev->mode_config.encoder_list, base.head) {
 +              if (source_encoder->new_crtc != crtc)
 +                      continue;
 +
 +              if (!encoders_cloneable(encoder, source_encoder))
 +                      return false;
 +      }
 +
 +      return true;
 +}
 +
 +static bool check_encoder_cloning(struct intel_crtc *crtc)
  {
 -      int num_encoders = 0;
 -      bool uncloneable_encoders = false;
 +      struct drm_device *dev = crtc->base.dev;
        struct intel_encoder *encoder;
  
 -      list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
 -                          base.head) {
 -              if (&encoder->new_crtc->base != crtc)
 +      list_for_each_entry(encoder,
 +                          &dev->mode_config.encoder_list, base.head) {
 +              if (encoder->new_crtc != crtc)
                        continue;
  
 -              num_encoders++;
 -              if (!encoder->cloneable)
 -                      uncloneable_encoders = true;
 +              if (!check_single_encoder_cloning(crtc, encoder))
 +                      return false;
        }
  
 -      return !(num_encoders > 1 && uncloneable_encoders);
 +      return true;
  }
  
  static struct intel_crtc_config *
@@@ -9275,7 -8971,7 +9289,7 @@@ intel_modeset_pipe_config(struct drm_cr
        int plane_bpp, ret = -EINVAL;
        bool retry = true;
  
 -      if (!check_encoder_cloning(crtc)) {
 +      if (!check_encoder_cloning(to_intel_crtc(crtc))) {
                DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
                return ERR_PTR(-EINVAL);
        }
@@@ -9431,22 -9127,29 +9445,22 @@@ intel_modeset_affected_pipes(struct drm
                        *prepare_pipes |= 1 << encoder->new_crtc->pipe;
        }
  
 -      /* Check for any pipes that will be fully disabled ... */
 +      /* Check for pipes that will be enabled/disabled ... */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
 -              bool used = false;
 -
 -              /* Don't try to disable disabled crtcs. */
 -              if (!intel_crtc->base.enabled)
 +              if (intel_crtc->base.enabled == intel_crtc->new_enabled)
                        continue;
  
 -              list_for_each_entry(encoder, &dev->mode_config.encoder_list,
 -                                  base.head) {
 -                      if (encoder->new_crtc == intel_crtc)
 -                              used = true;
 -              }
 -
 -              if (!used)
 +              if (!intel_crtc->new_enabled)
                        *disable_pipes |= 1 << intel_crtc->pipe;
 +              else
 +                      *prepare_pipes |= 1 << intel_crtc->pipe;
        }
  
  
        /* set_mode is also used to update properties on life display pipes. */
        intel_crtc = to_intel_crtc(crtc);
 -      if (crtc->enabled)
 +      if (intel_crtc->new_enabled)
                *prepare_pipes |= 1 << intel_crtc->pipe;
  
        /*
@@@ -9505,13 -9208,10 +9519,13 @@@ intel_modeset_update_state(struct drm_d
  
        intel_modeset_commit_output_state(dev);
  
 -      /* Update computed state. */
 +      /* Double check state. */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
 -              intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
 +              WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
 +              WARN_ON(intel_crtc->new_config &&
 +                      intel_crtc->new_config != &intel_crtc->config);
 +              WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
        }
  
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@@ -9680,8 -9380,10 +9694,8 @@@ intel_pipe_config_compare(struct drm_de
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
  
 -      if (!HAS_DDI(dev)) {
 -              PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
 -              PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
 -      }
 +      PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
 +      PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  
  #undef PIPE_CONF_CHECK_X
  #undef PIPE_CONF_CHECK_I
@@@ -9941,7 -9643,6 +9955,7 @@@ static int __intel_set_mode(struct drm_
                }
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       "[modeset]");
 +              to_intel_crtc(crtc)->new_config = pipe_config;
        }
  
        /*
         * adjusted_mode bits in the crtc directly.
         */
        if (IS_VALLEYVIEW(dev)) {
 -              valleyview_modeset_global_pipes(dev, &prepare_pipes,
 -                                              modeset_pipes, pipe_config);
 +              valleyview_modeset_global_pipes(dev, &prepare_pipes);
  
                /* may have added more to prepare_pipes than we should */
                prepare_pipes &= ~disable_pipes;
                /* mode_set/enable/disable functions rely on a correct pipe
                 * config. */
                to_intel_crtc(crtc)->config = *pipe_config;
 +              to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
  
                /*
                 * Calculate and store various constants which
@@@ -10045,24 -9746,16 +10059,24 @@@ static void intel_set_config_free(struc
  
        kfree(config->save_connector_encoders);
        kfree(config->save_encoder_crtcs);
 +      kfree(config->save_crtc_enabled);
        kfree(config);
  }
  
  static int intel_set_config_save_state(struct drm_device *dev,
                                       struct intel_set_config *config)
  {
 +      struct drm_crtc *crtc;
        struct drm_encoder *encoder;
        struct drm_connector *connector;
        int count;
  
 +      config->save_crtc_enabled =
 +              kcalloc(dev->mode_config.num_crtc,
 +                      sizeof(bool), GFP_KERNEL);
 +      if (!config->save_crtc_enabled)
 +              return -ENOMEM;
 +
        config->save_encoder_crtcs =
                kcalloc(dev->mode_config.num_encoder,
                        sizeof(struct drm_crtc *), GFP_KERNEL);
         * Should anything bad happen only the expected state is
         * restored, not the drivers personal bookkeeping.
         */
 +      count = 0;
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              config->save_crtc_enabled[count++] = crtc->enabled;
 +      }
 +
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                config->save_encoder_crtcs[count++] = encoder->crtc;
  static void intel_set_config_restore_state(struct drm_device *dev,
                                           struct intel_set_config *config)
  {
 +      struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
        int count;
  
 +      count = 0;
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 +              crtc->new_enabled = config->save_crtc_enabled[count++];
 +
 +              if (crtc->new_enabled)
 +                      crtc->new_config = &crtc->config;
 +              else
 +                      crtc->new_config = NULL;
 +      }
 +
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
                encoder->new_crtc =
@@@ -10163,7 -9840,7 +10177,7 @@@ intel_set_config_compute_mode_changes(s
                        struct intel_crtc *intel_crtc =
                                to_intel_crtc(set->crtc);
  
 -                      if (intel_crtc->active && i915_fastboot) {
 +                      if (intel_crtc->active && i915.fastboot) {
                                DRM_DEBUG_KMS("crtc has no fb, will flip\n");
                                config->fb_changed = true;
                        } else {
@@@ -10199,9 -9876,9 +10213,9 @@@ intel_modeset_stage_output_state(struc
                                 struct drm_mode_set *set,
                                 struct intel_set_config *config)
  {
 -      struct drm_crtc *new_crtc;
        struct intel_connector *connector;
        struct intel_encoder *encoder;
 +      struct intel_crtc *crtc;
        int ro;
  
        /* The upper layers ensure that we either disable a crtc or have a list
        /* Update crtc of enabled connectors. */
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
 +              struct drm_crtc *new_crtc;
 +
                if (!connector->new_encoder)
                        continue;
  
        }
        /* Now we've also updated encoder->new_crtc for all encoders. */
  
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 +                          base.head) {
 +              crtc->new_enabled = false;
 +
 +              list_for_each_entry(encoder,
 +                                  &dev->mode_config.encoder_list,
 +                                  base.head) {
 +                      if (encoder->new_crtc == crtc) {
 +                              crtc->new_enabled = true;
 +                              break;
 +                      }
 +              }
 +
 +              if (crtc->new_enabled != crtc->base.enabled) {
 +                      DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
 +                                    crtc->new_enabled ? "en" : "dis");
 +                      config->mode_changed = true;
 +              }
 +
 +              if (crtc->new_enabled)
 +                      crtc->new_config = &crtc->config;
 +              else
 +                      crtc->new_config = NULL;
 +      }
 +
        return 0;
  }
  
 +static void disable_crtc_nofb(struct intel_crtc *crtc)
 +{
 +      struct drm_device *dev = crtc->base.dev;
 +      struct intel_encoder *encoder;
 +      struct intel_connector *connector;
 +
 +      DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
 +                    pipe_name(crtc->pipe));
 +
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
 +              if (connector->new_encoder &&
 +                  connector->new_encoder->new_crtc == crtc)
 +                      connector->new_encoder = NULL;
 +      }
 +
 +      list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
 +              if (encoder->new_crtc == crtc)
 +                      encoder->new_crtc = NULL;
 +      }
 +
 +      crtc->new_enabled = false;
 +      crtc->new_config = NULL;
 +}
 +
  static int intel_crtc_set_config(struct drm_mode_set *set)
  {
        struct drm_device *dev;
                 * flipping, so increasing its cost here shouldn't be a big
                 * deal).
                 */
 -              if (i915_fastboot && ret == 0)
 +              if (i915.fastboot && ret == 0)
                        intel_modeset_check_state(set->crtc->dev);
        }
  
  fail:
                intel_set_config_restore_state(dev, config);
  
 +              /*
 +               * HACK: if the pipe was on, but we didn't have a framebuffer,
 +               * force the pipe off to avoid oopsing in the modeset code
 +               * due to fb==NULL. This should only happen during boot since
 +               * we don't yet reconstruct the FB from the hardware state.
 +               */
 +              if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
 +                      disable_crtc_nofb(to_intel_crtc(save_set.crtc));
 +
                /* Try to restore the config */
                if (config->mode_changed &&
                    intel_set_mode(save_set.crtc, save_set.mode,
@@@ -10638,7 -10255,12 +10652,7 @@@ static int intel_encoder_clones(struct 
  
        list_for_each_entry(source_encoder,
                            &dev->mode_config.encoder_list, base.head) {
 -
 -              if (encoder == source_encoder)
 -                      index_mask |= (1 << entry);
 -
 -              /* Intel hw has only one MUX where enocoders could be cloned. */
 -              if (encoder->cloneable && source_encoder->cloneable)
 +              if (encoders_cloneable(encoder, source_encoder))
                        index_mask |= (1 << entry);
  
                entry++;
@@@ -10657,7 -10279,8 +10671,7 @@@ static bool has_edp_a(struct drm_devic
        if ((I915_READ(DP_A) & DP_DETECTED) == 0)
                return false;
  
 -      if (IS_GEN5(dev) &&
 -          (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
 +      if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
                return false;
  
        return true;
@@@ -10810,13 -10433,18 +10824,13 @@@ static void intel_setup_outputs(struct 
        drm_helper_move_panel_connectors_to_head(dev);
  }
  
 -void intel_framebuffer_fini(struct intel_framebuffer *fb)
 -{
 -      drm_framebuffer_cleanup(&fb->base);
 -      WARN_ON(!fb->obj->framebuffer_references--);
 -      drm_gem_object_unreference_unlocked(&fb->obj->base);
 -}
 -
  static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
  {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
  
 -      intel_framebuffer_fini(intel_fb);
 +      drm_framebuffer_cleanup(fb);
 +      WARN_ON(!intel_fb->obj->framebuffer_references--);
 +      drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
        kfree(intel_fb);
  }
  
@@@ -10835,12 -10463,12 +10849,12 @@@ static const struct drm_framebuffer_fun
        .create_handle = intel_user_framebuffer_create_handle,
  };
  
 -int intel_framebuffer_init(struct drm_device *dev,
 -                         struct intel_framebuffer *intel_fb,
 -                         struct drm_mode_fb_cmd2 *mode_cmd,
 -                         struct drm_i915_gem_object *obj)
 +static int intel_framebuffer_init(struct drm_device *dev,
 +                                struct intel_framebuffer *intel_fb,
 +                                struct drm_mode_fb_cmd2 *mode_cmd,
 +                                struct drm_i915_gem_object *obj)
  {
 -      int aligned_height, tile_height;
 +      int aligned_height;
        int pitch_limit;
        int ret;
  
        if (mode_cmd->offsets[0] != 0)
                return -EINVAL;
  
 -      tile_height = IS_GEN2(dev) ? 16 : 8;
 -      aligned_height = ALIGN(mode_cmd->height,
 -                             obj->tiling_mode ? tile_height : 1);
 +      aligned_height = intel_align_height(dev, mode_cmd->height,
 +                                          obj->tiling_mode);
        /* FIXME drm helper for size checks (especially planar formats)? */
        if (obj->base.size < aligned_height * mode_cmd->pitches[0])
                return -EINVAL;
@@@ -10995,7 -10624,6 +11009,7 @@@ static void intel_init_display(struct d
  
        if (HAS_DDI(dev)) {
                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
 +              dev_priv->display.get_plane_config = ironlake_get_plane_config;
                dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
 +              dev_priv->display.get_plane_config = ironlake_get_plane_config;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 +              dev_priv->display.get_plane_config = i9xx_get_plane_config;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
                dev_priv->display.update_plane = i9xx_update_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 +              dev_priv->display.get_plane_config = i9xx_get_plane_config;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@@ -11214,9 -10839,6 +11228,9 @@@ static struct intel_quirk intel_quirks[
  
        /* Acer Aspire 4736Z */
        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
 +
 +      /* Acer Aspire 5336 */
 +      { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -11247,7 -10869,6 +11261,7 @@@ static void i915_disable_vga(struct drm
        u8 sr1;
        u32 vga_reg = i915_vgacntrl_reg(dev);
  
 +      /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
        outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
@@@ -11280,9 -10901,7 +11294,9 @@@ void intel_modeset_suspend_hw(struct dr
  void intel_modeset_init(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      int i, j, ret;
 +      int sprite, ret;
 +      enum pipe pipe;
 +      struct intel_crtc *crtc;
  
        drm_mode_config_init(dev);
  
                      INTEL_INFO(dev)->num_pipes,
                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
  
 -      for_each_pipe(i) {
 -              intel_crtc_init(dev, i);
 -              for (j = 0; j < dev_priv->num_plane; j++) {
 -                      ret = intel_plane_init(dev, i, j);
 +      for_each_pipe(pipe) {
 +              intel_crtc_init(dev, pipe);
 +              for_each_sprite(pipe, sprite) {
 +                      ret = intel_plane_init(dev, pipe, sprite);
                        if (ret)
                                DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
 -                                            pipe_name(i), sprite_name(i, j), ret);
 +                                            pipe_name(pipe), sprite_name(pipe, sprite), ret);
                }
        }
  
  
        /* Just in case the BIOS is doing something questionable. */
        intel_disable_fbc(dev);
 +
 +      mutex_lock(&dev->mode_config.mutex);
 +      intel_modeset_setup_hw_state(dev, false);
 +      mutex_unlock(&dev->mode_config.mutex);
 +
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 +                          base.head) {
 +              if (!crtc->active)
 +                      continue;
 +
 +              /*
 +               * Note that reserving the BIOS fb up front prevents us
 +               * from stuffing other stolen allocations like the ring
 +               * on top.  This prevents some ugliness at boot time, and
 +               * can even allow for smooth boot transitions if the BIOS
 +               * fb is large enough for the active pipe configuration.
 +               */
 +              if (dev_priv->display.get_plane_config) {
 +                      dev_priv->display.get_plane_config(crtc,
 +                                                         &crtc->plane_config);
 +                      /*
 +                       * If the fb is shared between multiple heads, we'll
 +                       * just get the first one.
 +                       */
 +                      intel_find_plane_obj(crtc, &crtc->plane_config);
 +              }
 +      }
  }
  
  static void
@@@ -11550,21 -11142,11 +11564,21 @@@ static void intel_sanitize_encoder(stru
         * the crtc fixup. */
  }
  
 -void i915_redisable_vga(struct drm_device *dev)
 +void i915_redisable_vga_power_on(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 vga_reg = i915_vgacntrl_reg(dev);
  
 +      if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
 +              DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 +              i915_disable_vga(dev);
 +      }
 +}
 +
 +void i915_redisable_vga(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
        /* This function can be called both from intel_modeset_setup_hw_state or
         * at a very early point in our resume sequence, where the power well
         * structures are not yet restored. Since this function is at a very
         * level, just check if the power well is enabled instead of trying to
         * follow the "don't touch the power well if we don't need it" policy
         * the rest of the driver uses. */
 -      if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
 -          (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 +      if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
                return;
  
 -      if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
 -              DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 -              i915_disable_vga(dev);
 -      }
 +      i915_redisable_vga_power_on(dev);
  }
  
  static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@@ -11679,8 -11265,9 +11693,8 @@@ void intel_modeset_setup_hw_state(struc
         */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list,
                            base.head) {
 -              if (crtc->active && i915_fastboot) {
 -                      intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
 -
 +              if (crtc->active && i915.fastboot) {
 +                      intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config);
                        DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
                                      crtc->base.base.id);
                        drm_mode_debug_printmodeline(&crtc->base.mode);
  
  void intel_modeset_gem_init(struct drm_device *dev)
  {
 +      struct drm_crtc *c;
 +      struct intel_framebuffer *fb;
 +
        intel_modeset_init_hw(dev);
  
        intel_setup_overlay(dev);
  
 -      mutex_lock(&dev->mode_config.mutex);
 -      drm_mode_config_reset(dev);
 -      intel_modeset_setup_hw_state(dev, false);
 -      mutex_unlock(&dev->mode_config.mutex);
 +      /*
 +       * Make sure any fbs we allocated at startup are properly
 +       * pinned & fenced.  When we do the allocation it's too early
 +       * for this.
 +       */
 +      mutex_lock(&dev->struct_mutex);
 +      list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
 +              if (!c->fb)
 +                      continue;
 +
 +              fb = to_intel_framebuffer(c->fb);
 +              if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
 +                      DRM_ERROR("failed to pin boot fb on pipe %d\n",
 +                                to_intel_crtc(c)->pipe);
 +                      drm_framebuffer_unreference(c->fb);
 +                      c->fb = NULL;
 +              }
 +      }
 +      mutex_unlock(&dev->struct_mutex);
 +}
 +
 +void intel_connector_unregister(struct intel_connector *intel_connector)
 +{
 +      struct drm_connector *connector = &intel_connector->base;
 +
 +      intel_panel_destroy_backlight(connector);
 +      drm_sysfs_connector_remove(connector);
  }
  
  void intel_modeset_cleanup(struct drm_device *dev)
  
        /* destroy the backlight and sysfs files before encoders/connectors */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 -              intel_panel_destroy_backlight(connector);
 -              drm_sysfs_connector_remove(connector);
 +              struct intel_connector *intel_connector;
 +
 +              intel_connector = to_intel_connector(connector);
 +              intel_connector->unregister(intel_connector);
        }
  
        drm_mode_config_cleanup(dev);
@@@ -11853,24 -11412,12 +11867,24 @@@ int intel_modeset_vga_set_state(struct 
        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
        u16 gmch_ctrl;
  
 -      pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
 +      if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
 +              DRM_ERROR("failed to read control word\n");
 +              return -EIO;
 +      }
 +
 +      if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
 +              return 0;
 +
        if (state)
                gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
        else
                gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
 -      pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
 +
 +      if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
 +              DRM_ERROR("failed to write control word\n");
 +              return -EIO;
 +      }
 +
        return 0;
  }
  
@@@ -11942,8 -11489,7 +11956,8 @@@ intel_display_capture_error_state(struc
  
        for_each_pipe(i) {
                error->pipe[i].power_domain_on =
 -                      intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
 +                      intel_display_power_enabled_sw(dev_priv,
 +                                                     POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
  
                enum transcoder cpu_transcoder = transcoders[i];
  
                error->transcoder[i].power_domain_on =
 -                      intel_display_power_enabled_sw(dev,
 +                      intel_display_power_enabled_sw(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
@@@ -91,25 -91,18 +91,25 @@@ static struct intel_dp *intel_attached_
  }
  
  static void intel_dp_link_down(struct intel_dp *intel_dp);
 +static void edp_panel_vdd_on(struct intel_dp *intel_dp);
 +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  
  static int
  intel_dp_max_link_bw(struct intel_dp *intel_dp)
  {
        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 +      struct drm_device *dev = intel_dp->attached_connector->base.dev;
  
        switch (max_link_bw) {
        case DP_LINK_BW_1_62:
        case DP_LINK_BW_2_7:
                break;
        case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
 -              max_link_bw = DP_LINK_BW_2_7;
 +              if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
 +                  intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
 +                      max_link_bw = DP_LINK_BW_5_4;
 +              else
 +                      max_link_bw = DP_LINK_BW_2_7;
                break;
        default:
                WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@@ -301,7 -294,7 +301,7 @@@ static u32 _pp_stat_reg(struct intel_d
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
  }
  
 -static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 +static bool edp_have_panel_power(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  }
  
 -static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 +static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -326,7 -319,7 +326,7 @@@ intel_dp_check_edp(struct intel_dp *int
        if (!is_edp(intel_dp))
                return;
  
 -      if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
 +      if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
                WARN(1, "eDP powered off while attempting aux channel communication.\n");
                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
                              I915_READ(_pp_stat_reg(intel_dp)),
@@@ -358,46 -351,31 +358,46 @@@ intel_dp_aux_wait_done(struct intel_dp 
        return status;
  }
  
 -static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
 -                                    int index)
 +static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      /* The clock divider is based off the hrawclk,
 -       * and would like to run at 2MHz. So, take the
 -       * hrawclk value and divide by 2 and use that
 -       *
 -       * Note that PCH attached eDP panels should use a 125MHz input
 -       * clock divider.
 +      /*
 +       * The clock divider is based off the hrawclk, and would like to run at
 +       * 2MHz.  So, take the hrawclk value and divide by 2 and use that
         */
 -      if (IS_VALLEYVIEW(dev)) {
 -              return index ? 0 : 100;
 -      } else if (intel_dig_port->port == PORT_A) {
 -              if (index)
 -                      return 0;
 -              if (HAS_DDI(dev))
 -                      return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
 -              else if (IS_GEN6(dev) || IS_GEN7(dev))
 +      return index ? 0 : intel_hrawclk(dev) / 2;
 +}
 +
 +static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 +{
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct drm_device *dev = intel_dig_port->base.base.dev;
 +
 +      if (index)
 +              return 0;
 +
 +      if (intel_dig_port->port == PORT_A) {
 +              if (IS_GEN6(dev) || IS_GEN7(dev))
                        return 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        return 225; /* eDP input clock at 450Mhz */
 +      } else {
 +              return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 +      }
 +}
 +
 +static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 +{
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct drm_device *dev = intel_dig_port->base.base.dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +
 +      if (intel_dig_port->port == PORT_A) {
 +              if (index)
 +                      return 0;
 +              return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
                switch (index) {
                case 1: return 72;
                default: return 0;
                }
 -      } else if (HAS_PCH_SPLIT(dev)) {
 +      } else  {
                return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 -      } else {
 -              return index ? 0 :intel_hrawclk(dev) / 2;
        }
  }
  
 +static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 +{
 +      return index ? 0 : 100;
 +}
 +
 +static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
 +                                    bool has_aux_irq,
 +                                    int send_bytes,
 +                                    uint32_t aux_clock_divider)
 +{
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct drm_device *dev = intel_dig_port->base.base.dev;
 +      uint32_t precharge, timeout;
 +
 +      if (IS_GEN6(dev))
 +              precharge = 3;
 +      else
 +              precharge = 5;
 +
 +      if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
 +              timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 +      else
 +              timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 +
 +      return DP_AUX_CH_CTL_SEND_BUSY |
 +             DP_AUX_CH_CTL_DONE |
 +             (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 +             DP_AUX_CH_CTL_TIME_OUT_ERROR |
 +             timeout |
 +             DP_AUX_CH_CTL_RECEIVE_ERROR |
 +             (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 +             (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 +             (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 +}
 +
  static int
  intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
        uint32_t status;
 -      int try, precharge, clock = 0;
 +      int try, clock = 0;
-       bool has_aux_irq = true;
+       bool has_aux_irq = HAS_AUX_IRQ(dev);
 -      uint32_t timeout;
  
        /* dp aux is extremely sensitive to irq latency, hence request the
         * lowest possible wakeup latency and so prevent the cpu from going into
  
        intel_dp_check_edp(intel_dp);
  
 -      if (IS_GEN6(dev))
 -              precharge = 3;
 -      else
 -              precharge = 5;
 -
 -      if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
 -              timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 -      else
 -              timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 -
        intel_aux_display_runtime_get(dev_priv);
  
        /* Try to wait for any previous AUX channel activity */
                goto out;
        }
  
 -      while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
 +      while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
 +              u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
 +                                                        has_aux_irq,
 +                                                        send_bytes,
 +                                                        aux_clock_divider);
 +
                /* Must try at least 3 times according to DP spec */
                for (try = 0; try < 5; try++) {
                        /* Load the send data into the aux channel data registers */
                                           pack_aux(send + i, send_bytes - i));
  
                        /* Send the command and wait for it to complete */
 -                      I915_WRITE(ch_ctl,
 -                                 DP_AUX_CH_CTL_SEND_BUSY |
 -                                 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 -                                 timeout |
 -                                 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 -                                 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 -                                 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
 -                                 DP_AUX_CH_CTL_DONE |
 -                                 DP_AUX_CH_CTL_TIME_OUT_ERROR |
 -                                 DP_AUX_CH_CTL_RECEIVE_ERROR);
 +                      I915_WRITE(ch_ctl, send_ctl);
  
                        status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  
@@@ -577,6 -537,7 +577,7 @@@ intel_dp_aux_native_write(struct intel_
        uint8_t msg[20];
        int msg_bytes;
        uint8_t ack;
+       int retry;
  
        if (WARN_ON(send_bytes > 16))
                return -E2BIG;
        msg[3] = send_bytes - 1;
        memcpy(&msg[4], send, send_bytes);
        msg_bytes = send_bytes + 4;
-       for (;;) {
+       for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
                if (ret < 0)
                        return ret;
                ack >>= 4;
                if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
-                       break;
+                       return send_bytes;
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-                       udelay(100);
+                       usleep_range(400, 500);
                else
                        return -EIO;
        }
-       return send_bytes;
+       DRM_ERROR("too many retries, giving up\n");
+       return -EIO;
  }
  
  /* Write a single byte to the aux channel in native mode */
@@@ -622,6 -585,7 +625,7 @@@ intel_dp_aux_native_read(struct intel_d
        int reply_bytes;
        uint8_t ack;
        int ret;
+       int retry;
  
        if (WARN_ON(recv_bytes > 19))
                return -E2BIG;
        msg_bytes = 4;
        reply_bytes = recv_bytes + 1;
  
-       for (;;) {
+       for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
                                      reply, reply_bytes);
                if (ret == 0)
                        return ret - 1;
                }
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
-                       udelay(100);
+                       usleep_range(400, 500);
                else
                        return -EIO;
        }
+       DRM_ERROR("too many retries, giving up\n");
+       return -EIO;
  }
  
  static int
@@@ -670,7 -637,7 +677,7 @@@ intel_dp_i2c_aux_ch(struct i2c_adapter 
        int reply_bytes;
        int ret;
  
 -      ironlake_edp_panel_vdd_on(intel_dp);
 +      edp_panel_vdd_on(intel_dp);
        intel_dp_check_edp(intel_dp);
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
        ret = -EREMOTEIO;
  
  out:
 -      ironlake_edp_panel_vdd_off(intel_dp, false);
 +      edp_panel_vdd_off(intel_dp, false);
        return ret;
  }
  
 +static void
 +intel_dp_connector_unregister(struct intel_connector *intel_connector)
 +{
 +      struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
 +
 +      sysfs_remove_link(&intel_connector->base.kdev->kobj,
 +                        intel_dp->adapter.dev.kobj.name);
 +      intel_connector_unregister(intel_connector);
 +}
 +
  static int
  intel_dp_i2c_init(struct intel_dp *intel_dp,
                  struct intel_connector *intel_connector, const char *name)
        strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
        intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
        intel_dp->adapter.algo_data = &intel_dp->algo;
 -      intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 +      intel_dp->adapter.dev.parent = intel_connector->base.dev->dev;
  
        ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
 +      if (ret < 0)
 +              return ret;
 +
 +      ret = sysfs_create_link(&intel_connector->base.kdev->kobj,
 +                              &intel_dp->adapter.dev.kobj,
 +                              intel_dp->adapter.dev.kobj.name);
 +
 +      if (ret < 0)
 +              i2c_del_adapter(&intel_dp->adapter);
 +
        return ret;
  }
  
@@@ -865,10 -812,9 +872,10 @@@ intel_dp_compute_config(struct intel_en
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
        int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 -      int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
 +      /* Conveniently, the link BW constants become indices with a shift...*/
 +      int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
 -      static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 +      static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
        int link_avail, link_clock;
  
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
  
 -              for (clock = 0; clock <= max_clock; clock++) {
 -                      for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 +              for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 +                      for (clock = 0; clock <= max_clock; clock++) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
@@@ -1069,16 -1015,16 +1076,16 @@@ static void intel_dp_mode_set(struct in
                ironlake_set_pll_cpu_edp(intel_dp);
  }
  
 -#define IDLE_ON_MASK          (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 -#define IDLE_ON_VALUE         (PP_ON | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
 +#define IDLE_ON_MASK          (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 +#define IDLE_ON_VALUE         (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
  
 -#define IDLE_OFF_MASK         (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
 -#define IDLE_OFF_VALUE                (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 +#define IDLE_OFF_MASK         (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
 +#define IDLE_OFF_VALUE                (0     | PP_SEQUENCE_NONE | 0                     | 0)
  
 -#define IDLE_CYCLE_MASK               (PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 -#define IDLE_CYCLE_VALUE      (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 +#define IDLE_CYCLE_MASK               (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 +#define IDLE_CYCLE_VALUE      (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
  
 -static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
 +static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
  {
        DRM_DEBUG_KMS("Wait complete\n");
  }
  
 -static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
 +static void wait_panel_on(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power on\n");
 -      ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
 +      wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  }
  
 -static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
 +static void wait_panel_off(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power off time\n");
 -      ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
 +      wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  }
  
 -static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
 +static void wait_panel_power_cycle(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power cycle\n");
 -      ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 +
 +      /* When we disable the VDD override bit last we have to do the manual
 +       * wait. */
 +      wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
 +                                     intel_dp->panel_power_cycle_delay);
 +
 +      wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
  }
  
 +static void wait_backlight_on(struct intel_dp *intel_dp)
 +{
 +      wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
 +                                     intel_dp->backlight_on_delay);
 +}
 +
 +static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 +{
 +      wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
 +                                     intel_dp->backlight_off_delay);
 +}
  
  /* Read the current pp_control value, unlocking the register if it
   * is locked
@@@ -1155,7 -1084,7 +1162,7 @@@ static  u32 ironlake_get_pp_control(str
        return control;
  }
  
 -void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 +static void edp_panel_vdd_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        intel_dp->want_panel_vdd = true;
  
 -      if (ironlake_edp_have_panel_vdd(intel_dp))
 +      if (edp_have_panel_vdd(intel_dp))
                return;
  
        intel_runtime_pm_get(dev_priv);
  
        DRM_DEBUG_KMS("Turning eDP VDD on\n");
  
 -      if (!ironlake_edp_have_panel_power(intel_dp))
 -              ironlake_wait_panel_power_cycle(intel_dp);
 +      if (!edp_have_panel_power(intel_dp))
 +              wait_panel_power_cycle(intel_dp);
  
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_FORCE_VDD;
        /*
         * If the panel wasn't on, delay before accessing aux channel
         */
 -      if (!ironlake_edp_have_panel_power(intel_dp)) {
 +      if (!edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP was not running\n");
                msleep(intel_dp->panel_power_up_delay);
        }
  }
  
 -static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
 +static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  
 -      if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
 +      if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
                DRM_DEBUG_KMS("Turning eDP VDD off\n");
  
                pp = ironlake_get_pp_control(intel_dp);
                I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  
                if ((pp & POWER_TARGET_ON) == 0)
 -                      msleep(intel_dp->panel_power_cycle_delay);
 +                      intel_dp->last_power_cycle = jiffies;
  
                intel_runtime_pm_put(dev_priv);
        }
  }
  
 -static void ironlake_panel_vdd_work(struct work_struct *__work)
 +static void edp_panel_vdd_work(struct work_struct *__work)
  {
        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
                                                 struct intel_dp, panel_vdd_work);
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
  
        mutex_lock(&dev->mode_config.mutex);
 -      ironlake_panel_vdd_off_sync(intel_dp);
 +      edp_panel_vdd_off_sync(intel_dp);
        mutex_unlock(&dev->mode_config.mutex);
  }
  
 -void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  {
        if (!is_edp(intel_dp))
                return;
        intel_dp->want_panel_vdd = false;
  
        if (sync) {
 -              ironlake_panel_vdd_off_sync(intel_dp);
 +              edp_panel_vdd_off_sync(intel_dp);
        } else {
                /*
                 * Queue the timer to fire a long
        }
  }
  
 -void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 +void intel_edp_panel_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        DRM_DEBUG_KMS("Turn eDP power on\n");
  
 -      if (ironlake_edp_have_panel_power(intel_dp)) {
 +      if (edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP power already on\n");
                return;
        }
  
 -      ironlake_wait_panel_power_cycle(intel_dp);
 +      wait_panel_power_cycle(intel_dp);
  
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
  
 -      ironlake_wait_panel_on(intel_dp);
 +      wait_panel_on(intel_dp);
 +      intel_dp->last_power_on = jiffies;
  
        if (IS_GEN5(dev)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
        }
  }
  
 -void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 +void intel_edp_panel_off(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        DRM_DEBUG_KMS("Turn eDP power off\n");
  
 +      edp_wait_backlight_off(intel_dp);
 +
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
 -      pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 +      pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
 +              EDP_BLC_ENABLE);
  
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
  
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
  
 -      ironlake_wait_panel_off(intel_dp);
 +      intel_dp->last_power_cycle = jiffies;
 +      wait_panel_off(intel_dp);
  }
  
 -void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
 +void intel_edp_backlight_on(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
         * link.  So delay a bit to make sure the image is solid before
         * allowing it to appear.
         */
 -      msleep(intel_dp->backlight_on_delay);
 +      wait_backlight_on(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
  
        intel_panel_enable_backlight(intel_dp->attached_connector);
  }
  
 -void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 +void intel_edp_backlight_off(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 -      msleep(intel_dp->backlight_off_delay);
 +      intel_dp->last_backlight_off = jiffies;
  }
  
  static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@@ -1479,14 -1403,7 +1486,14 @@@ static bool intel_dp_get_hw_state(struc
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      u32 tmp = I915_READ(intel_dp->output_reg);
 +      enum intel_display_power_domain power_domain;
 +      u32 tmp;
 +
 +      power_domain = intel_display_port_power_domain(encoder);
 +      if (!intel_display_power_enabled(dev_priv, power_domain))
 +              return false;
 +
 +      tmp = I915_READ(intel_dp->output_reg);
  
        if (!(tmp & DP_PORT_EN))
                return false;
@@@ -1680,12 -1597,10 +1687,12 @@@ static void intel_edp_psr_enable_sink(s
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
 +      uint32_t aux_clock_divider;
        int precharge = 0x3;
        int msg_size = 5;       /* Header(4) + Message(1) */
  
 +      aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 +
        /* Enable PSR in sink */
        if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
                intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
@@@ -1753,7 -1668,7 +1760,7 @@@ static bool intel_edp_psr_match_conditi
                return false;
        }
  
 -      if (!i915_enable_psr) {
 +      if (!i915.enable_psr) {
                DRM_DEBUG_KMS("PSR disable by flag\n");
                return false;
        }
@@@ -1869,11 -1784,9 +1876,11 @@@ static void intel_disable_dp(struct int
  
        /* Make sure the panel is off before trying to change the mode. But also
         * ensure that we have vdd while we switch off the panel. */
 -      ironlake_edp_backlight_off(intel_dp);
 +      edp_panel_vdd_on(intel_dp);
 +      intel_edp_backlight_off(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 -      ironlake_edp_panel_off(intel_dp);
 +      intel_edp_panel_off(intel_dp);
 +      edp_panel_vdd_off(intel_dp, true);
  
        /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
        if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@@ -1903,11 -1816,11 +1910,11 @@@ static void intel_enable_dp(struct inte
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
  
 -      ironlake_edp_panel_vdd_on(intel_dp);
 +      edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
 -      ironlake_edp_panel_on(intel_dp);
 -      ironlake_edp_panel_vdd_off(intel_dp, true);
 +      intel_edp_panel_on(intel_dp);
 +      edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
  }
@@@ -1917,14 -1830,14 +1924,14 @@@ static void g4x_enable_dp(struct intel_
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
        intel_enable_dp(encoder);
 -      ironlake_edp_backlight_on(intel_dp);
 +      intel_edp_backlight_on(intel_dp);
  }
  
  static void vlv_enable_dp(struct intel_encoder *encoder)
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
 -      ironlake_edp_backlight_on(intel_dp);
 +      intel_edp_backlight_on(intel_dp);
  }
  
  static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@@ -1963,10 -1876,12 +1970,12 @@@ static void vlv_pre_enable_dp(struct in
  
        mutex_unlock(&dev_priv->dpio_lock);
  
-       /* init power sequencer on this pipe and port */
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       if (is_edp(intel_dp)) {
+               /* init power sequencer on this pipe and port */
+               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                             &power_seq);
+       }
  
        intel_enable_dp(encoder);
  
@@@ -2715,15 -2630,10 +2724,15 @@@ intel_dp_complete_link_train(struct int
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
 +      uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 +
 +      /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
 +      if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
 +              training_pattern = DP_TRAINING_PATTERN_3;
  
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp, &DP,
 -                                   DP_TRAINING_PATTERN_2 |
 +                                   training_pattern |
                                     DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to start channel equalization\n");
                return;
                if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
 -                                              DP_TRAINING_PATTERN_2 |
 +                                              training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        cr_tries++;
                        continue;
                        intel_dp_link_down(intel_dp);
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
 -                                              DP_TRAINING_PATTERN_2 |
 +                                              training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        tries = 0;
                        cr_tries++;
@@@ -2908,14 -2818,6 +2917,14 @@@ intel_dp_get_dpcd(struct intel_dp *inte
                }
        }
  
 +      /* Training Pattern 3 support */
 +      if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
 +          intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
 +              intel_dp->use_tps3 = true;
 +              DRM_DEBUG_KMS("Displayport TPS3 supported");
 +      } else
 +              intel_dp->use_tps3 = false;
 +
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@@ -2939,7 -2841,7 +2948,7 @@@ intel_dp_probe_oui(struct intel_dp *int
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
  
 -      ironlake_edp_panel_vdd_on(intel_dp);
 +      edp_panel_vdd_on(intel_dp);
  
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
  
 -      ironlake_edp_panel_vdd_off(intel_dp, false);
 +      edp_panel_vdd_off(intel_dp, false);
 +}
 +
 +int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 +{
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct drm_device *dev = intel_dig_port->base.base.dev;
 +      struct intel_crtc *intel_crtc =
 +              to_intel_crtc(intel_dig_port->base.base.crtc);
 +      u8 buf[1];
 +
 +      if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
 +              return -EAGAIN;
 +
 +      if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
 +              return -ENOTTY;
 +
 +      if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
 +                                       DP_TEST_SINK_START))
 +              return -EAGAIN;
 +
 +      /* Wait 2 vblanks to be sure we will have the correct CRC value */
 +      intel_wait_for_vblank(dev, intel_crtc->pipe);
 +      intel_wait_for_vblank(dev, intel_crtc->pipe);
 +
 +      if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
 +              return -EAGAIN;
 +
 +      intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
 +      return 0;
  }
  
  static bool
@@@ -3225,14 -3098,10 +3234,14 @@@ intel_dp_detect(struct drm_connector *c
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum drm_connector_status status;
 +      enum intel_display_power_domain power_domain;
        struct edid *edid = NULL;
  
        intel_runtime_pm_get(dev_priv);
  
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector));
  
        status = connector_status_connected;
  
  out:
 +      intel_display_power_put(dev_priv, power_domain);
 +
        intel_runtime_pm_put(dev_priv);
 +
        return status;
  }
  
  static int intel_dp_get_modes(struct drm_connector *connector)
  {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      enum intel_display_power_domain power_domain;
        int ret;
  
        /* We should parse the EDID data and find out if it has an audio sink
         */
  
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
        ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
 +      intel_display_power_put(dev_priv, power_domain);
        if (ret)
                return ret;
  
@@@ -3309,25 -3167,15 +3318,25 @@@ static boo
  intel_dp_detect_audio(struct drm_connector *connector)
  {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
 +      struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 +      struct intel_encoder *intel_encoder = &intel_dig_port->base;
 +      struct drm_device *dev = connector->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      enum intel_display_power_domain power_domain;
        struct edid *edid;
        bool has_audio = false;
  
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
        edid = intel_dp_get_edid(connector, &intel_dp->adapter);
        if (edid) {
                has_audio = drm_detect_monitor_audio(edid);
                kfree(edid);
        }
  
 +      intel_display_power_put(dev_priv, power_domain);
 +
        return has_audio;
  }
  
@@@ -3447,7 -3295,7 +3456,7 @@@ void intel_dp_encoder_destroy(struct dr
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                mutex_lock(&dev->mode_config.mutex);
 -              ironlake_panel_vdd_off_sync(intel_dp);
 +              edp_panel_vdd_off_sync(intel_dp);
                mutex_unlock(&dev->mode_config.mutex);
        }
        kfree(intel_dig_port);
@@@ -3546,13 -3394,6 +3555,13 @@@ intel_dp_add_properties(struct intel_d
        }
  }
  
 +static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 +{
 +      intel_dp->last_power_cycle = jiffies;
 +      intel_dp->last_power_on = jiffies;
 +      intel_dp->last_backlight_off = jiffies;
 +}
 +
  static void
  intel_dp_init_panel_power_sequencer(struct drm_device *dev,
                                    struct intel_dp *intel_dp,
@@@ -3675,17 -3516,10 +3684,17 @@@ intel_dp_init_panel_power_sequencer_reg
                pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
        }
  
 -      /* And finally store the new values in the power sequencer. */
 +      /*
 +       * And finally store the new values in the power sequencer. The
 +       * backlight delays are set to 1 because we do manual waits on them. For
 +       * T8, even BSpec recommends doing it. For T9, if we don't do this,
 +       * we'll end up waiting for the backlight off delay twice: once when we
 +       * do the manual sleep, and once when we disable the panel and wait for
 +       * the PP_STATUS bit to become zero.
 +       */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
 -              (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
 -      pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
 +              (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
 +      pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
  }
  
  static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 -                                   struct intel_connector *intel_connector)
 +                                   struct intel_connector *intel_connector,
 +                                   struct edp_power_seq *power_seq)
  {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
 -      struct edp_power_seq power_seq = { 0 };
        bool has_dpcd;
        struct drm_display_mode *scan;
        struct edid *edid;
        if (!is_edp(intel_dp))
                return true;
  
 -      intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 -
        /* Cache DPCD and EDID for edp. */
 -      ironlake_edp_panel_vdd_on(intel_dp);
 +      edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
 -      ironlake_edp_panel_vdd_off(intel_dp, false);
 +      edp_panel_vdd_off(intel_dp, false);
  
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
        }
  
        /* We now know it's not a ghost, init power sequence regs. */
 -      intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
 -                                                    &power_seq);
 +      intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
  
        edid = drm_get_edid(connector, &intel_dp->adapter);
        if (edid) {
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
        }
  
 -      intel_panel_init(&intel_connector->panel, fixed_mode);
 +      intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
        intel_panel_setup_backlight(connector);
  
        return true;
@@@ -3801,22 -3638,9 +3810,22 @@@ intel_dp_init_connector(struct intel_di
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
 +      struct edp_power_seq power_seq = { 0 };
        const char *name = NULL;
        int type, error;
  
 +      /* intel_dp vfuncs */
 +      if (IS_VALLEYVIEW(dev))
 +              intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
 +      else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 +              intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
 +      else if (HAS_PCH_SPLIT(dev))
 +              intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
 +      else
 +              intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 +
 +      intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 +
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
        intel_dp->attached_connector = intel_connector;
        connector->doublescan_allowed = 0;
  
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
 -                        ironlake_panel_vdd_work);
 +                        edp_panel_vdd_work);
  
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
 +      intel_connector->unregister = intel_dp_connector_unregister;
  
        intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
        if (HAS_DDI(dev)) {
                BUG();
        }
  
 +      if (is_edp(intel_dp)) {
 +              intel_dp_init_panel_power_timestamps(intel_dp);
 +              intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 +      }
 +
        error = intel_dp_i2c_init(intel_dp, intel_connector, name);
        WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
             error, port_name(port));
  
        intel_dp->psr_setup_done = false;
  
 -      if (!intel_edp_init_connector(intel_dp, intel_connector)) {
 +      if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
                i2c_del_adapter(&intel_dp->adapter);
                if (is_edp(intel_dp)) {
                        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                        mutex_lock(&dev->mode_config.mutex);
 -                      ironlake_panel_vdd_off_sync(intel_dp);
 +                      edp_panel_vdd_off_sync(intel_dp);
                        mutex_unlock(&dev->mode_config.mutex);
                }
                drm_sysfs_connector_remove(connector);
@@@ -3980,7 -3798,7 +3989,7 @@@ intel_dp_init(struct drm_device *dev, i
  
        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 -      intel_encoder->cloneable = false;
 +      intel_encoder->cloneable = 0;
        intel_encoder->hot_plug = intel_dp_hot_plug;
  
        if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
@@@ -113,8 -113,7 +113,8 @@@ static u32 hsw_infoframe_enable(enum hd
  }
  
  static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
 -                                enum transcoder cpu_transcoder)
 +                                enum transcoder cpu_transcoder,
 +                                struct drm_i915_private *dev_priv)
  {
        switch (type) {
        case HDMI_INFOFRAME_TYPE_AVI:
@@@ -297,8 -296,7 +297,8 @@@ static void hsw_write_infoframe(struct 
        u32 val = I915_READ(ctl_reg);
  
        data_reg = hsw_infoframe_data_reg(type,
 -                                        intel_crtc->config.cpu_transcoder);
 +                                        intel_crtc->config.cpu_transcoder,
 +                                        dev_priv);
        if (data_reg == 0)
                return;
  
@@@ -425,7 -423,7 +425,7 @@@ static void g4x_set_infoframes(struct d
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        u32 reg = VIDEO_DIP_CTL;
        u32 val = I915_READ(reg);
 -      u32 port;
 +      u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
  
        assert_hdmi_port_disabled(intel_hdmi);
  
                return;
        }
  
 -      switch (intel_dig_port->port) {
 -      case PORT_B:
 -              port = VIDEO_DIP_PORT_B;
 -              break;
 -      case PORT_C:
 -              port = VIDEO_DIP_PORT_C;
 -              break;
 -      default:
 -              BUG();
 -              return;
 -      }
 -
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
                        val &= ~VIDEO_DIP_ENABLE;
@@@ -479,7 -489,7 +479,7 @@@ static void ibx_set_infoframes(struct d
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 -      u32 port;
 +      u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
  
        assert_hdmi_port_disabled(intel_hdmi);
  
                return;
        }
  
 -      switch (intel_dig_port->port) {
 -      case PORT_B:
 -              port = VIDEO_DIP_PORT_B;
 -              break;
 -      case PORT_C:
 -              port = VIDEO_DIP_PORT_C;
 -              break;
 -      case PORT_D:
 -              port = VIDEO_DIP_PORT_D;
 -              break;
 -      default:
 -              BUG();
 -              return;
 -      }
 -
        if (port != (val & VIDEO_DIP_PORT_MASK)) {
                if (val & VIDEO_DIP_ENABLE) {
                        val &= ~VIDEO_DIP_ENABLE;
@@@ -667,13 -692,8 +667,13 @@@ static bool intel_hdmi_get_hw_state(str
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 +      enum intel_display_power_domain power_domain;
        u32 tmp;
  
 +      power_domain = intel_display_port_power_domain(encoder);
 +      if (!intel_display_power_enabled(dev_priv, power_domain))
 +              return false;
 +
        tmp = I915_READ(intel_hdmi->hdmi_reg);
  
        if (!(tmp & SDVO_ENABLE))
@@@ -825,7 -845,7 +825,7 @@@ static int hdmi_portclock_limit(struct 
  {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
  
-       if (IS_G4X(dev))
+       if (!hdmi->has_hdmi_sink || IS_G4X(dev))
                return 165000;
        else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
                return 300000;
@@@ -879,8 -899,8 +879,8 @@@ bool intel_hdmi_compute_config(struct i
         * outputs. We also need to check that the higher clock still fits
         * within limits.
         */
-       if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
-           && HAS_PCH_SPLIT(dev)) {
+       if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+           clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
  
@@@ -914,15 -934,11 +914,15 @@@ intel_hdmi_detect(struct drm_connector 
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edid *edid;
 +      enum intel_display_power_domain power_domain;
        enum drm_connector_status status = connector_status_disconnected;
  
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector));
  
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        intel_hdmi->rgb_quant_range_selectable = false;
                intel_encoder->type = INTEL_OUTPUT_HDMI;
        }
  
 +      intel_display_power_put(dev_priv, power_domain);
 +
        return status;
  }
  
  static int intel_hdmi_get_modes(struct drm_connector *connector)
  {
 -      struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 +      struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
 +      struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      enum intel_display_power_domain power_domain;
 +      int ret;
  
        /* We should parse the EDID data and find out if it's an HDMI sink so
         * we can send audio to it.
         */
  
 -      return intel_ddc_get_modes(connector,
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
 +      ret = intel_ddc_get_modes(connector,
                                   intel_gmbus_get_adapter(dev_priv,
                                                           intel_hdmi->ddc_bus));
 +
 +      intel_display_power_put(dev_priv, power_domain);
 +
 +      return ret;
  }
  
  static bool
  intel_hdmi_detect_audio(struct drm_connector *connector)
  {
 -      struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 +      struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
 +      struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
 +      enum intel_display_power_domain power_domain;
        struct edid *edid;
        bool has_audio = false;
  
 +      power_domain = intel_display_port_power_domain(intel_encoder);
 +      intel_display_power_get(dev_priv, power_domain);
 +
        edid = drm_get_edid(connector,
                            intel_gmbus_get_adapter(dev_priv,
                                                    intel_hdmi->ddc_bus));
                kfree(edid);
        }
  
 +      intel_display_power_put(dev_priv, power_domain);
 +
        return has_audio;
  }
  
@@@ -1264,7 -1261,6 +1264,7 @@@ void intel_hdmi_init_connector(struct i
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
 +      intel_connector->unregister = intel_connector_unregister;
  
        intel_hdmi_add_properties(intel_hdmi, connector);
  
@@@ -1318,7 -1314,7 +1318,7 @@@ void intel_hdmi_init(struct drm_device 
  
        intel_encoder->type = INTEL_OUTPUT_HDMI;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 -      intel_encoder->cloneable = false;
 +      intel_encoder->cloneable = 0;
  
        intel_dig_port->port = port;
        intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
@@@ -33,6 -33,8 +33,6 @@@
  #include <linux/moduleparam.h>
  #include "intel_drv.h"
  
 -#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 -
  void
  intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
                       struct drm_display_mode *adjusted_mode)
@@@ -323,6 -325,13 +323,6 @@@ out
        pipe_config->gmch_pfit.lvds_border_bits = border;
  }
  
 -static int i915_panel_invert_brightness;
 -MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
 -      "(-1 force normal, 0 machine defaults, 1 force inversion), please "
 -      "report PCI device ID, subsystem vendor and subsystem device ID "
 -      "to dri-devel@lists.freedesktop.org, if your machine needs it. "
 -      "It will then be included in an upcoming module version.");
 -module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
  static u32 intel_panel_compute_brightness(struct intel_connector *connector,
                                          u32 val)
  {
  
        WARN_ON(panel->backlight.max == 0);
  
 -      if (i915_panel_invert_brightness < 0)
 +      if (i915.invert_brightness < 0)
                return val;
  
 -      if (i915_panel_invert_brightness > 0 ||
 +      if (i915.invert_brightness > 0 ||
            dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
                return panel->backlight.max - val;
        }
@@@ -689,7 -698,7 +689,7 @@@ static void i9xx_enable_backlight(struc
                freq /= 0xff;
  
        ctl = freq << 17;
-       if (IS_GEN2(dev) && panel->backlight.combination_mode)
+       if (panel->backlight.combination_mode)
                ctl |= BLM_LEGACY_MODE;
        if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
                ctl |= BLM_POLARITY_PNV;
@@@ -801,13 -810,13 +801,13 @@@ intel_panel_detect(struct drm_device *d
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        /* Assume that the BIOS does not lie through the OpRegion... */
 -      if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
 +      if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
                return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
                        connector_status_connected :
                        connector_status_disconnected;
        }
  
 -      switch (i915_panel_ignore_lid) {
 +      switch (i915.panel_ignore_lid) {
        case -2:
                return connector_status_connected;
        case -1:
@@@ -970,7 -979,7 +970,7 @@@ static int i9xx_setup_backlight(struct 
  
        ctl = I915_READ(BLC_PWM_CTL);
  
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
                panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
  
        if (IS_PINEVIEW(dev))
@@@ -1190,11 -1199,9 +1190,11 @@@ void intel_panel_init_backlight_funcs(s
  }
  
  int intel_panel_init(struct intel_panel *panel,
 -                   struct drm_display_mode *fixed_mode)
 +                   struct drm_display_mode *fixed_mode,
 +                   struct drm_display_mode *downclock_mode)
  {
        panel->fixed_mode = fixed_mode;
 +      panel->downclock_mode = downclock_mode;
  
        return 0;
  }
@@@ -97,7 -97,7 +97,7 @@@ static void i8xx_enable_fbc(struct drm_
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
 -      int plane, i;
 +      int i;
        u32 fbc_ctl;
  
        cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
                cfb_pitch = (cfb_pitch / 32) - 1;
        else
                cfb_pitch = (cfb_pitch / 64) - 1;
 -      plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
  
        /* Clear old tags */
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
  
                /* Set it up... */
                fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
 -              fbc_ctl2 |= plane;
 +              fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
                I915_WRITE(FBC_CONTROL2, fbc_ctl2);
                I915_WRITE(FBC_FENCE_OFF, crtc->y);
        }
        fbc_ctl |= obj->fence_reg;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
  
 -      DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c",
 +      DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
                      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
  }
  
@@@ -153,19 -154,17 +153,19 @@@ static void g4x_enable_fbc(struct drm_c
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        u32 dpfc_ctl;
  
 -      dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
 +      dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
 +      if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 +              dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 +      else
 +              dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 -      I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
  
        I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
  
        /* enable it... */
 -      I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 +      I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  
        DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
  }
@@@ -225,16 -224,18 +225,16 @@@ static void ironlake_enable_fbc(struct 
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 -      int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        u32 dpfc_ctl;
  
 -      dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 -      dpfc_ctl &= DPFC_RESERVED;
 -      dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
 -      /* Set persistent mode for front-buffer rendering, ala X. */
 -      dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
 +      dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
 +      if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 +              dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 +      else
 +              dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
        if (IS_GEN5(dev))
                dpfc_ctl |= obj->fence_reg;
 -      I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
  
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
        I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
@@@ -281,27 -282,20 +281,27 @@@ static void gen7_enable_fbc(struct drm_
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      u32 dpfc_ctl;
  
 -      I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
 +      dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
 +      if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 +              dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 +      else
 +              dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 +      dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  
 -      I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
 -                 IVB_DPFC_CTL_FENCE_EN |
 -                 intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
 +      I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  
        if (IS_IVYBRIDGE(dev)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
 -              I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
 +              I915_WRITE(ILK_DISPLAY_CHICKEN1,
 +                         I915_READ(ILK_DISPLAY_CHICKEN1) |
 +                         ILK_FBCQ_DIS);
        } else {
 -              /* WaFbcAsynchFlipDisableFbcQueue:hsw */
 -              I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
 -                         HSW_BYPASS_FBC_QUEUE);
 +              /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
 +              I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
 +                         I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
 +                         HSW_FBCQ_DIS);
        }
  
        I915_WRITE(SNB_DPFC_CTL_SA,
@@@ -472,7 -466,7 +472,7 @@@ void intel_update_fbc(struct drm_devic
                return;
        }
  
 -      if (!i915_powersave) {
 +      if (!i915.powersave) {
                if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
                        DRM_DEBUG_KMS("fbc disabled per module param\n");
                return;
        obj = intel_fb->obj;
        adjusted_mode = &intel_crtc->config.adjusted_mode;
  
 -      if (i915_enable_fbc < 0 &&
 +      if (i915.enable_fbc < 0 &&
            INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
                if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
                        DRM_DEBUG_KMS("disabled per chip default\n");
                goto out_disable;
        }
 -      if (!i915_enable_fbc) {
 +      if (!i915.enable_fbc) {
                if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
                        DRM_DEBUG_KMS("fbc disabled per module param\n");
                goto out_disable;
                        DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
        }
 -      if ((INTEL_INFO(dev)->gen < 4 || IS_HASWELL(dev)) &&
 +      if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
            intel_crtc->plane != PLANE_A) {
                if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
                        DRM_DEBUG_KMS("plane not A, disabling compression\n");
@@@ -1134,7 -1128,7 +1134,7 @@@ static bool g4x_compute_wm0(struct drm_
                *plane_wm = display->max_wm;
  
        /* Use the large buffer method to calculate cursor watermark */
 -      line_time_us = ((htotal * 1000) / clock);
 +      line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
        entries = line_count * 64 * pixel_size;
        tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
@@@ -1210,7 -1204,7 +1210,7 @@@ static bool g4x_compute_srwm(struct drm
        hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
  
 -      line_time_us = (htotal * 1000) / clock;
 +      line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (latency_ns / line_time_us + 1000) / 1000;
        line_size = hdisplay * pixel_size;
  
@@@ -1443,7 -1437,7 +1443,7 @@@ static void i965_update_wm(struct drm_c
                unsigned long line_time_us;
                int entries;
  
 -              line_time_us = ((htotal * 1000) / clock);
 +              line_time_us = max(htotal * 1000 / clock, 1);
  
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@@ -1569,7 -1563,7 +1569,7 @@@ static void i9xx_update_wm(struct drm_c
                unsigned long line_time_us;
                int entries;
  
 -              line_time_us = (htotal * 1000) / clock;
 +              line_time_us = max(htotal * 1000 / clock, 1);
  
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
@@@ -1892,7 -1886,7 +1892,7 @@@ static unsigned int ilk_cursor_wm_max(c
  }
  
  /* Calculate the maximum FBC watermark */
 -static unsigned int ilk_fbc_wm_max(struct drm_device *dev)
 +static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
  {
        /* max that registers can hold */
        if (INTEL_INFO(dev)->gen >= 8)
                return 15;
  }
  
 -static void ilk_compute_wm_maximums(struct drm_device *dev,
 +static void ilk_compute_wm_maximums(const struct drm_device *dev,
                                    int level,
                                    const struct intel_wm_config *config,
                                    enum intel_ddb_partitioning ddb_partitioning,
@@@ -1954,7 -1948,7 +1954,7 @@@ static bool ilk_validate_wm_level(int l
        return ret;
  }
  
 -static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
 +static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
                                 int level,
                                 const struct ilk_pipe_wm_parameters *p,
                                 struct intel_wm_level *result)
@@@ -2146,7 -2140,7 +2146,7 @@@ static bool intel_compute_pipe_wm(struc
                                  struct intel_pipe_wm *pipe_wm)
  {
        struct drm_device *dev = crtc->dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      const struct drm_i915_private *dev_priv = dev->dev_private;
        int level, max_level = ilk_wm_max_level(dev);
        /* LP0 watermark maximums depend on this pipe alone */
        struct intel_wm_config config = {
@@@ -2744,7 -2738,7 +2744,7 @@@ intel_alloc_context_page(struct drm_dev
                return NULL;
        }
  
 -      ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
 +      ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
        if (ret) {
                DRM_ERROR("failed to pin power context: %d\n", ret);
                goto err_unref;
        return ctx;
  
  err_unpin:
 -      i915_gem_object_unpin(ctx);
 +      i915_gem_object_ggtt_unpin(ctx);
  err_unref:
        drm_gem_object_unreference(&ctx->base);
        return NULL;
@@@ -3006,9 -3000,6 +3006,9 @@@ static void gen6_set_rps_thresholds(str
        dev_priv->rps.last_adj = 0;
  }
  
 +/* gen6_set_rps is called to update the frequency request, but should also be
 + * called when the range (min_delay and max_delay) is modified so that we can
 + * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
  void gen6_set_rps(struct drm_device *dev, u8 val)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
  
 -      if (val == dev_priv->rps.cur_delay)
 +      if (val == dev_priv->rps.cur_delay) {
 +              /* min/max delay may still have been modified so be sure to
 +               * write the limits value */
 +              I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
 +                         gen6_rps_limits(dev_priv, val));
 +
                return;
 +      }
  
        gen6_set_rps_thresholds(dev_priv, val);
  
        trace_intel_gpu_freq_change(val * 50);
  }
  
 +/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
 + *
 + * * If Gfx is Idle, then
 + * 1. Mask Turbo interrupts
 + * 2. Bring up Gfx clock
 + * 3. Change the freq to Rpn and wait till P-Unit updates freq
 + * 4. Clear the Force GFX CLK ON bit so that Gfx can down
 + * 5. Unmask Turbo interrupts
 +*/
 +static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 +{
 +      /*
 +       * When we are idle.  Drop to min voltage state.
 +       */
 +
 +      if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay)
 +              return;
 +
 +      /* Mask turbo interrupt so that they will not come in between */
 +      I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
 +
 +      /* Bring up the Gfx clock */
 +      I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
 +              I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
 +                              VLV_GFX_CLK_FORCE_ON_BIT);
 +
 +      if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
 +              I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
 +                      DRM_ERROR("GFX_CLK_ON request timed out\n");
 +              return;
 +      }
 +
 +      dev_priv->rps.cur_delay = dev_priv->rps.min_delay;
 +
 +      vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
 +                                      dev_priv->rps.min_delay);
 +
 +      if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
 +                              & GENFREQSTATUS) == 0, 5))
 +              DRM_ERROR("timed out waiting for Punit\n");
 +
 +      /* Release the Gfx clock */
 +      I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
 +              I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
 +                              ~VLV_GFX_CLK_FORCE_ON_BIT);
 +
 +      /* Unmask Up interrupts */
 +      dev_priv->rps.rp_up_masked = true;
 +      gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
 +                                              dev_priv->rps.min_delay);
 +}
 +
  void gen6_rps_idle(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
                if (IS_VALLEYVIEW(dev))
 -                      valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 +                      vlv_set_rps_idle(dev_priv);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
                dev_priv->rps.last_adj = 0;
@@@ -3199,10 -3132,16 +3199,10 @@@ static void valleyview_disable_rps(stru
  
  static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
  {
 -      if (IS_GEN6(dev))
 -              DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
 -
 -      if (IS_HASWELL(dev))
 -              DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
 -
        DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
 -                      (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
 -                      (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
 -                      (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
 +               (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
 +               (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
 +               (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
  }
  
  int intel_enable_rc6(const struct drm_device *dev)
                return 0;
  
        /* Respect the kernel parameter if it is set */
 -      if (i915_enable_rc6 >= 0)
 -              return i915_enable_rc6;
 +      if (i915.enable_rc6 >= 0)
 +              return i915.enable_rc6;
  
        /* Disable RC6 on Ironlake */
        if (INTEL_INFO(dev)->gen == 5)
                return 0;
  
 -      if (IS_HASWELL(dev))
 -              return INTEL_RC6_ENABLE;
 -
 -      /* snb/ivb have more than one rc6 state. */
 -      if (INTEL_INFO(dev)->gen == 6)
 -              return INTEL_RC6_ENABLE;
 +      if (IS_IVYBRIDGE(dev))
 +              return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
  
 -      return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 +      return INTEL_RC6_ENABLE;
  }
  
  static void gen6_enable_rps_interrupts(struct drm_device *dev)
@@@ -3279,10 -3222,10 +3279,10 @@@ static void gen8_enable_rps(struct drm_
        /* 3: Enable RC6 */
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
 -      DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
 +      intel_print_rc6_info(dev, rc6_mask);
        I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
 -                      GEN6_RC_CTL_EI_MODE(1) |
 -                      rc6_mask);
 +                                  GEN6_RC_CTL_EI_MODE(1) |
 +                                  rc6_mask);
  
        /* 4 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
@@@ -3324,7 -3267,7 +3324,7 @@@ static void gen6_enable_rps(struct drm_
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
 -      u32 rp_state_cap;
 +      u32 rp_state_cap, hw_max, hw_min;
        u32 gt_perf_status;
        u32 rc6vids, pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
        gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
  
        /* In units of 50MHz */
 -      dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
 -      dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
 +      dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff;
 +      hw_min = (rp_state_cap >> 16) & 0xff;
        dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
        dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
        dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
        dev_priv->rps.cur_delay = 0;
  
 +      /* Preserve min/max settings in case of re-init */
 +      if (dev_priv->rps.max_delay == 0)
 +              dev_priv->rps.max_delay = hw_max;
 +
 +      if (dev_priv->rps.min_delay == 0)
 +              dev_priv->rps.min_delay = hw_min;
 +
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
  
@@@ -3557,6 -3493,8 +3557,8 @@@ static void valleyview_setup_pctx(struc
        u32 pcbr;
        int pctx_size = 24*1024;
  
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
        pcbr = I915_READ(VLV_PCBR);
        if (pcbr) {
                /* BIOS set it up already, grab the pre-alloc'd space */
@@@ -3595,7 -3533,7 +3597,7 @@@ static void valleyview_enable_rps(struc
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
 -      u32 gtfifodbg, val, rc6_mode = 0;
 +      u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0;
        int i;
  
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
  
-       valleyview_setup_pctx(dev);
        /* If VLV, Forcewake all wells, else re-direct to regular path */
        gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
  
                         vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
                         dev_priv->rps.cur_delay);
  
 -      dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
 -      dev_priv->rps.hw_max = dev_priv->rps.max_delay;
 +      dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv);
        DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
 -                       dev_priv->rps.max_delay);
 +                       vlv_gpu_freq(dev_priv, hw_max),
 +                       hw_max);
  
        dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
        DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
  
 -      dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
 +      hw_min = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
 -                       vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
 -                       dev_priv->rps.min_delay);
 +                       vlv_gpu_freq(dev_priv, hw_min),
 +                       hw_min);
 +
 +      /* Preserve min/max settings in case of re-init */
 +      if (dev_priv->rps.max_delay == 0)
 +              dev_priv->rps.max_delay = hw_max;
 +
 +      if (dev_priv->rps.min_delay == 0)
 +              dev_priv->rps.min_delay = hw_min;
  
        DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
  
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
  
 +      dev_priv->rps.rp_up_masked = false;
 +      dev_priv->rps.rp_down_masked = false;
 +
        gen6_enable_rps_interrupts(dev);
  
        gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
@@@ -3698,13 -3625,13 +3698,13 @@@ void ironlake_teardown_rc6(struct drm_d
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (dev_priv->ips.renderctx) {
 -              i915_gem_object_unpin(dev_priv->ips.renderctx);
 +              i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
                drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
                dev_priv->ips.renderctx = NULL;
        }
  
        if (dev_priv->ips.pwrctx) {
 -              i915_gem_object_unpin(dev_priv->ips.pwrctx);
 +              i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
                drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
                dev_priv->ips.pwrctx = NULL;
        }
@@@ -3896,10 -3823,9 +3896,10 @@@ static unsigned long __i915_chipset_val
  
  unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
  {
 +      struct drm_device *dev = dev_priv->dev;
        unsigned long val;
  
 -      if (dev_priv->info->gen != 5)
 +      if (INTEL_INFO(dev)->gen != 5)
                return 0;
  
        spin_lock_irq(&mchdev_lock);
@@@ -3928,7 -3854,6 +3928,7 @@@ unsigned long i915_mch_val(struct drm_i
  
  static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
  {
 +      struct drm_device *dev = dev_priv->dev;
        static const struct v_table {
                u16 vd; /* in .1 mil */
                u16 vm; /* in .1 mil */
                { 16000, 14875, },
                { 16125, 15000, },
        };
 -      if (dev_priv->info->is_mobile)
 +      if (INTEL_INFO(dev)->is_mobile)
                return v_table[pxvid].vm;
        else
                return v_table[pxvid].vd;
@@@ -4105,9 -4030,7 +4105,9 @@@ static void __i915_update_gfx_val(struc
  
  void i915_update_gfx_val(struct drm_i915_private *dev_priv)
  {
 -      if (dev_priv->info->gen != 5)
 +      struct drm_device *dev = dev_priv->dev;
 +
 +      if (INTEL_INFO(dev)->gen != 5)
                return;
  
        spin_lock_irq(&mchdev_lock);
@@@ -4156,10 -4079,9 +4156,10 @@@ static unsigned long __i915_gfx_val(str
  
  unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
  {
 +      struct drm_device *dev = dev_priv->dev;
        unsigned long val;
  
 -      if (dev_priv->info->gen != 5)
 +      if (INTEL_INFO(dev)->gen != 5)
                return 0;
  
        spin_lock_irq(&mchdev_lock);
@@@ -4348,7 -4270,6 +4348,7 @@@ void intel_gpu_ips_teardown(void
        i915_mch_dev = NULL;
        spin_unlock_irq(&mchdev_lock);
  }
 +
  static void intel_init_emon(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -4474,6 -4395,8 +4474,8 @@@ void intel_enable_gt_powersave(struct d
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
        } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               if (IS_VALLEYVIEW(dev))
+                       valleyview_setup_pctx(dev);
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
@@@ -4664,17 -4587,6 +4666,17 @@@ static void gen6_init_clock_gating(stru
                I915_WRITE(GEN6_GT_MODE,
                           _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
  
 +      /*
 +       * BSpec recoomends 8x4 when MSAA is used,
 +       * however in practice 16x4 seems fastest.
 +       *
 +       * Note that PS/WM thread counts depend on the WIZ hashing
 +       * disable bit, which we don't touch here, but it's good
 +       * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 +       */
 +      I915_WRITE(GEN6_GT_MODE,
 +                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +
        ilk_init_lp_watermarks(dev);
  
        I915_WRITE(CACHE_MODE_0,
         * According to the spec, bit 11 (RCCUNIT) must also be set,
         * but we didn't debug actual testcases to find it out.
         *
 -       * Also apply WaDisableVDSUnitClockGating:snb and
 -       * WaDisableRCPBUnitClockGating:snb.
 +       * WaDisableRCCUnitClockGating:snb
 +       * WaDisableRCPBUnitClockGating:snb
         */
        I915_WRITE(GEN6_UCGCTL2,
 -                 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
                   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
                   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  
 -      /* Bspec says we need to always set all mask bits. */
 -      I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
 -                 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
 +      /* WaStripsFansDisableFastClipPerformanceFix:snb */
 +      I915_WRITE(_3D_CHICKEN3,
 +                 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
 +
 +      /*
 +       * Bspec says:
 +       * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
 +       * 3DSTATE_SF number of SF output attributes is more than 16."
 +       */
 +      I915_WRITE(_3D_CHICKEN3,
 +                 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
  
        /*
         * According to the spec the following bits should be
  
        g4x_disable_trickle_feed(dev);
  
 -      /* The default value should be 0x200 according to docs, but the two
 -       * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
 -      I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
 -      I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
 -
        cpt_init_clock_gating(dev);
  
        gen6_check_mch_setup(dev);
@@@ -4747,17 -4657,14 +4749,17 @@@ static void gen7_setup_fixed_func_sched
  {
        uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
  
 +      /*
 +       * WaVSThreadDispatchOverride:ivb,vlv
 +       *
 +       * This actually overrides the dispatch
 +       * mode for all thread types.
 +       */
        reg &= ~GEN7_FF_SCHED_MASK;
        reg |= GEN7_FF_TS_SCHED_HW;
        reg |= GEN7_FF_VS_SCHED_HW;
        reg |= GEN7_FF_DS_SCHED_HW;
  
 -      if (IS_HASWELL(dev_priv->dev))
 -              reg &= ~GEN7_FF_VS_REF_CNT_FFME;
 -
        I915_WRITE(GEN7_FF_THREAD_MODE, reg);
  }
  
@@@ -4795,7 -4702,7 +4797,7 @@@ static void lpt_suspend_hw(struct drm_d
  static void gen8_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      enum pipe i;
 +      enum pipe pipe;
  
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        /* FIXME(BDW): Check all the w/a, some might only apply to
         * pre-production hw. */
  
 -      WARN(!i915_preliminary_hw_support,
 -           "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n");
 +      /* WaDisablePartialInstShootdown:bdw */
 +      I915_WRITE(GEN8_ROW_CHICKEN,
 +                 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
 +
 +      /* WaDisableThreadStallDopClockGating:bdw */
 +      /* FIXME: Unclear whether we really need this on production bdw. */
 +      I915_WRITE(GEN8_ROW_CHICKEN,
 +                 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
 +
 +      /*
 +       * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
 +       * pre-production hardware
 +       */
        I915_WRITE(HALF_SLICE_CHICKEN3,
                   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
        I915_WRITE(HALF_SLICE_CHICKEN3,
                   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
  
        /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
 -      for_each_pipe(i) {
 -              I915_WRITE(CHICKEN_PIPESL_1(i),
 -                         I915_READ(CHICKEN_PIPESL_1(i) |
 -                                   DPRS_MASK_VBLANK_SRD));
 +      for_each_pipe(pipe) {
 +              I915_WRITE(CHICKEN_PIPESL_1(pipe),
 +                         I915_READ(CHICKEN_PIPESL_1(pipe)) |
 +                         BDW_DPRS_MASK_VBLANK_SRD);
        }
  
        /* Use Force Non-Coherent whenever executing a 3D context. This is a
        I915_WRITE(GEN7_FF_THREAD_MODE,
                   I915_READ(GEN7_FF_THREAD_MODE) &
                   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 +
 +      /*
 +       * BSpec recommends 8x4 when MSAA is used,
 +       * however in practice 16x4 seems fastest.
 +       *
 +       * Note that PS/WM thread counts depend on the WIZ hashing
 +       * disable bit, which we don't touch here, but it's good
 +       * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 +       */
 +      I915_WRITE(GEN7_GT_MODE,
 +                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +
 +      I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
 +                 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 +
 +      /* WaDisableSDEUnitClockGating:bdw */
 +      I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
 +                 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  }
  
  static void haswell_init_clock_gating(struct drm_device *dev)
  
        ilk_init_lp_watermarks(dev);
  
 -      /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
 -       * This implements the WaDisableRCZUnitClockGating:hsw workaround.
 -       */
 -      I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 -
 -      /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
 -      I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 -                 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 -
 -      /* WaApplyL3ControlAndL3ChickenMode:hsw */
 -      I915_WRITE(GEN7_L3CNTLREG1,
 -                      GEN7_WA_FOR_GEN7_L3_CONTROL);
 -      I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
 -                      GEN7_WA_L3_CHICKEN_MODE);
 -
        /* L3 caching of data atomics doesn't work -- disable it. */
        I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
        I915_WRITE(HSW_ROW_CHICKEN3,
                        GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  
        /* WaVSRefCountFullforceMissDisable:hsw */
 -      gen7_setup_fixed_func_scheduler(dev_priv);
 +      I915_WRITE(GEN7_FF_THREAD_MODE,
 +                 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
 +
 +      /* enable HiZ Raw Stall Optimization */
 +      I915_WRITE(CACHE_MODE_0_GEN7,
 +                 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
  
        /* WaDisable4x2SubspanOptimization:hsw */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  
 +      /*
 +       * BSpec recommends 8x4 when MSAA is used,
 +       * however in practice 16x4 seems fastest.
 +       *
 +       * Note that PS/WM thread counts depend on the WIZ hashing
 +       * disable bit, which we don't touch here, but it's good
 +       * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 +       */
 +      I915_WRITE(GEN7_GT_MODE,
 +                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +
        /* WaSwitchSolVfFArbitrationPriority:hsw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  
@@@ -4950,6 -4827,9 +4952,6 @@@ static void ivybridge_init_clock_gating
        if (IS_IVB_GT1(dev))
                I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 -      else
 -              I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
 -                         _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  
        /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
        if (IS_IVB_GT1(dev))
                I915_WRITE(GEN7_ROW_CHICKEN2,
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 -      else
 +      else {
 +              /* must write both registers */
 +              I915_WRITE(GEN7_ROW_CHICKEN2,
 +                         _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
                I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 -
 +      }
  
        /* WaForceL3Serialization:ivb */
        I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
                   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
  
 -      /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 -       * gating disable must be set.  Failure to set it results in
 -       * flickering pixels due to Z write ordering failures after
 -       * some amount of runtime in the Mesa "fire" demo, and Unigine
 -       * Sanctuary and Tropics, and apparently anything else with
 -       * alpha test or pixel discard.
 -       *
 -       * According to the spec, bit 11 (RCCUNIT) must also be set,
 -       * but we didn't debug actual testcases to find it out.
 -       *
 +      /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:ivb workaround.
         */
        I915_WRITE(GEN6_UCGCTL2,
 -                 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
 -                 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 +                 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  
        /* This is required by WaCatErrorRejectionIssue:ivb */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  
        g4x_disable_trickle_feed(dev);
  
 -      /* WaVSRefCountFullforceMissDisable:ivb */
        gen7_setup_fixed_func_scheduler(dev_priv);
  
 +      if (0) { /* causes HiZ corruption on ivb:gt1 */
 +              /* enable HiZ Raw Stall Optimization */
 +              I915_WRITE(CACHE_MODE_0_GEN7,
 +                         _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
 +      }
 +
        /* WaDisable4x2SubspanOptimization:ivb */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  
 +      /*
 +       * BSpec recommends 8x4 when MSAA is used,
 +       * however in practice 16x4 seems fastest.
 +       *
 +       * Note that PS/WM thread counts depend on the WIZ hashing
 +       * disable bit, which we don't touch here, but it's good
 +       * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
 +       */
 +      I915_WRITE(GEN7_GT_MODE,
 +                 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 +
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
        snpcr &= ~GEN6_MBC_SNPCR_MASK;
        snpcr |= GEN6_MBC_SNPCR_MED;
@@@ -5058,12 -4929,19 +5060,12 @@@ static void valleyview_init_clock_gatin
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
  
 +      /* WaPsdDispatchEnable:vlv */
        /* WaDisablePSDDualDispatchEnable:vlv */
        I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
                   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
                                      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
  
 -      /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
 -      I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 -                 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 -
 -      /* WaApplyL3ControlAndL3ChickenMode:vlv */
 -      I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
 -      I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 -
        /* WaForceL3Serialization:vlv */
        I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
                   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
                   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  
 -      /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
 -       * gating disable must be set.  Failure to set it results in
 -       * flickering pixels due to Z write ordering failures after
 -       * some amount of runtime in the Mesa "fire" demo, and Unigine
 -       * Sanctuary and Tropics, and apparently anything else with
 -       * alpha test or pixel discard.
 -       *
 -       * According to the spec, bit 11 (RCCUNIT) must also be set,
 -       * but we didn't debug actual testcases to find it out.
 -       *
 +      gen7_setup_fixed_func_scheduler(dev_priv);
 +
 +      /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:vlv workaround.
 -       *
 -       * Also apply WaDisableVDSUnitClockGating:vlv and
 -       * WaDisableRCPBUnitClockGating:vlv.
         */
        I915_WRITE(GEN6_UCGCTL2,
 -                 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
 -                 GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
 -                 GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
 -                 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
 -                 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 +                 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  
 +      /* WaDisableL3Bank2xClockGate:vlv */
        I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
  
        I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  
 +      /*
 +       * BSpec says this must be set, even though
 +       * WaDisable4x2SubspanOptimization isn't listed for VLV.
 +       */
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
  
 +      /*
 +       * WaIncreaseL3CreditsForVLVB0:vlv
 +       * This is the hardware default actually.
 +       */
 +      I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
 +
        /*
         * WaDisableVLVClockGating_VBIIssue:vlv
         * Disable clock gating on th GCFG unit to prevent a delay
         * in the reporting of vblank events.
         */
 -      I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
 -
 -      /* Conservative clock gating settings for now */
 -      I915_WRITE(0x9400, 0xffffffff);
 -      I915_WRITE(0x9404, 0xffffffff);
 -      I915_WRITE(0x9408, 0xffffffff);
 -      I915_WRITE(0x940c, 0xffffffff);
 -      I915_WRITE(0x9410, 0xffffffff);
 -      I915_WRITE(0x9414, 0xffffffff);
 -      I915_WRITE(0x9418, 0xffffffff);
 +      I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
  }
  
  static void g4x_init_clock_gating(struct drm_device *dev)
@@@ -5224,16 -5114,19 +5226,16 @@@ void intel_suspend_hw(struct drm_devic
   * enable it, so check if it's enabled and also check if we've requested it to
   * be enabled.
   */
 -static bool hsw_power_well_enabled(struct drm_device *dev,
 +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
        return I915_READ(HSW_PWR_WELL_DRIVER) ==
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
  }
  
 -bool intel_display_power_enabled_sw(struct drm_device *dev,
 +bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                                    enum intel_display_power_domain domain)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
  
        power_domains = &dev_priv->power_domains;
        return power_domains->domain_use_count[domain];
  }
  
 -bool intel_display_power_enabled(struct drm_device *dev,
 +bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        bool is_enabled;
                if (power_well->always_on)
                        continue;
  
 -              if (!power_well->is_enabled(dev, power_well)) {
 +              if (!power_well->ops->is_enabled(dev_priv, power_well)) {
                        is_enabled = false;
                        break;
                }
        return is_enabled;
  }
  
 +/*
 + * Starting with Haswell, we have a "Power Down Well" that can be turned off
 + * when not needed anymore. We have 4 registers that can request the power well
 + * to be enabled, and it will only be disabled if none of the registers is
 + * requesting it to be enabled.
 + */
  static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
        }
  }
  
 +static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
 +{
 +      assert_spin_locked(&dev->vbl_lock);
 +
 +      dev->vblank[pipe].last = 0;
 +}
 +
  static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
 -      enum pipe p;
 +      enum pipe pipe;
        unsigned long irqflags;
  
        /*
         * FIXME: Should we do this in general in drm_vblank_post_modeset?
         */
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
 -      for_each_pipe(p)
 -              if (p != PIPE_A)
 -                      dev->vblank[p].last = 0;
 +      for_each_pipe(pipe)
 +              if (pipe != PIPE_A)
 +                      reset_vblank_counter(dev, pipe);
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
  }
  
 -static void hsw_set_power_well(struct drm_device *dev,
 +static void hsw_set_power_well(struct drm_i915_private *dev_priv,
                               struct i915_power_well *power_well, bool enable)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        bool is_enabled, enable_requested;
        uint32_t tmp;
  
        }
  }
  
 -static void __intel_power_well_get(struct drm_device *dev,
 +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
  
 -      if (!power_well->count++ && power_well->set) {
 -              hsw_disable_package_c8(dev_priv);
 -              power_well->set(dev, power_well, true);
 -      }
 +      /*
 +       * We're taking over the BIOS, so clear any requests made by it since
 +       * the driver is in charge now.
 +       */
 +      if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
 +              I915_WRITE(HSW_PWR_WELL_BIOS, 0);
 +}
 +
 +static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 +                                struct i915_power_well *power_well)
 +{
 +      hsw_disable_package_c8(dev_priv);
 +      hsw_set_power_well(dev_priv, power_well, true);
  }
  
 -static void __intel_power_well_put(struct drm_device *dev,
 +static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      hsw_set_power_well(dev_priv, power_well, false);
 +      hsw_enable_package_c8(dev_priv);
 +}
 +
 +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
 +                                         struct i915_power_well *power_well)
 +{
 +}
 +
 +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
 +                                           struct i915_power_well *power_well)
 +{
 +      return true;
 +}
 +
 +static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 +                             struct i915_power_well *power_well, bool enable)
 +{
 +      enum punit_power_well power_well_id = power_well->data;
 +      u32 mask;
 +      u32 state;
 +      u32 ctrl;
 +
 +      mask = PUNIT_PWRGT_MASK(power_well_id);
 +      state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
 +                       PUNIT_PWRGT_PWR_GATE(power_well_id);
 +
 +      mutex_lock(&dev_priv->rps.hw_lock);
 +
 +#define COND \
 +      ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 +
 +      if (COND)
 +              goto out;
 +
 +      ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
 +      ctrl &= ~mask;
 +      ctrl |= state;
 +      vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 +
 +      if (wait_for(COND, 100))
 +              DRM_ERROR("timout setting power well state %08x (%08x)\n",
 +                        state,
 +                        vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 +
 +#undef COND
 +
 +out:
 +      mutex_unlock(&dev_priv->rps.hw_lock);
 +}
 +
 +static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
 +                                 struct i915_power_well *power_well)
 +{
 +      vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
 +}
 +
 +static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
 +                                struct i915_power_well *power_well)
 +{
 +      vlv_set_power_well(dev_priv, power_well, true);
 +}
 +
 +static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
 +                                 struct i915_power_well *power_well)
 +{
 +      vlv_set_power_well(dev_priv, power_well, false);
 +}
 +
 +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 +                                 struct i915_power_well *power_well)
 +{
 +      int power_well_id = power_well->data;
 +      bool enabled = false;
 +      u32 mask;
 +      u32 state;
 +      u32 ctrl;
 +
 +      mask = PUNIT_PWRGT_MASK(power_well_id);
 +      ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 +
 +      mutex_lock(&dev_priv->rps.hw_lock);
 +
 +      state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
 +      /*
 +       * We only ever set the power-on and power-gate states, anything
 +       * else is unexpected.
 +       */
 +      WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
 +              state != PUNIT_PWRGT_PWR_GATE(power_well_id));
 +      if (state == ctrl)
 +              enabled = true;
 +
 +      /*
 +       * A transient state at this point would mean some unexpected party
 +       * is poking at the power controls too.
 +       */
 +      ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
 +      WARN_ON(ctrl != state);
 +
 +      mutex_unlock(&dev_priv->rps.hw_lock);
 +
 +      return enabled;
 +}
 +
 +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 +                                        struct i915_power_well *power_well)
 +{
 +      WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 +
 +      vlv_set_power_well(dev_priv, power_well, true);
 +
 +      spin_lock_irq(&dev_priv->irq_lock);
 +      valleyview_enable_display_irqs(dev_priv);
 +      spin_unlock_irq(&dev_priv->irq_lock);
 +
 +      /*
 +       * During driver initialization we need to defer enabling hotplug
 +       * processing until fbdev is set up.
 +       */
 +      if (dev_priv->enable_hotplug_processing)
 +              intel_hpd_init(dev_priv->dev);
 +
 +      i915_redisable_vga_power_on(dev_priv->dev);
 +}
 +
 +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 +                                         struct i915_power_well *power_well)
 +{
 +      struct drm_device *dev = dev_priv->dev;
 +      enum pipe pipe;
 +
 +      WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 +
 +      spin_lock_irq(&dev_priv->irq_lock);
 +      for_each_pipe(pipe)
 +              __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
  
 -      WARN_ON(!power_well->count);
 +      valleyview_disable_display_irqs(dev_priv);
 +      spin_unlock_irq(&dev_priv->irq_lock);
 +
 +      spin_lock_irq(&dev->vbl_lock);
 +      for_each_pipe(pipe)
 +              reset_vblank_counter(dev, pipe);
 +      spin_unlock_irq(&dev->vbl_lock);
 +
 +      vlv_set_power_well(dev_priv, power_well, false);
 +}
 +
 +static void check_power_well_state(struct drm_i915_private *dev_priv,
 +                                 struct i915_power_well *power_well)
 +{
 +      bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
 +
 +      if (power_well->always_on || !i915.disable_power_well) {
 +              if (!enabled)
 +                      goto mismatch;
  
 -      if (!--power_well->count && power_well->set &&
 -          i915_disable_power_well) {
 -              power_well->set(dev, power_well, false);
 -              hsw_enable_package_c8(dev_priv);
 +              return;
        }
 +
 +      if (enabled != (power_well->count > 0))
 +              goto mismatch;
 +
 +      return;
 +
 +mismatch:
 +      WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
 +                power_well->name, power_well->always_on, enabled,
 +                power_well->count, i915.disable_power_well);
  }
  
 -void intel_display_power_get(struct drm_device *dev,
 +void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        int i;
  
        mutex_lock(&power_domains->lock);
  
 -      for_each_power_well(i, power_well, BIT(domain), power_domains)
 -              __intel_power_well_get(dev, power_well);
 +      for_each_power_well(i, power_well, BIT(domain), power_domains) {
 +              if (!power_well->count++) {
 +                      DRM_DEBUG_KMS("enabling %s\n", power_well->name);
 +                      power_well->ops->enable(dev_priv, power_well);
 +              }
 +
 +              check_power_well_state(dev_priv, power_well);
 +      }
  
        power_domains->domain_use_count[domain]++;
  
        mutex_unlock(&power_domains->lock);
  }
  
 -void intel_display_power_put(struct drm_device *dev,
 +void intel_display_power_put(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
        int i;
        WARN_ON(!power_domains->domain_use_count[domain]);
        power_domains->domain_use_count[domain]--;
  
 -      for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
 -              __intel_power_well_put(dev, power_well);
 +      for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 +              WARN_ON(!power_well->count);
 +
 +              if (!--power_well->count && i915.disable_power_well) {
 +                      DRM_DEBUG_KMS("disabling %s\n", power_well->name);
 +                      power_well->ops->disable(dev_priv, power_well);
 +              }
 +
 +              check_power_well_state(dev_priv, power_well);
 +      }
  
        mutex_unlock(&power_domains->lock);
  }
@@@ -5633,7 -5333,7 +5635,7 @@@ void i915_request_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
 -      intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
 +      intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_request_power_well);
  
@@@ -5647,99 -5347,29 +5649,99 @@@ void i915_release_power_well(void
  
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
 -      intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
 +      intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
  }
  EXPORT_SYMBOL_GPL(i915_release_power_well);
  
 +#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 +
 +#define HSW_ALWAYS_ON_POWER_DOMAINS (                 \
 +      BIT(POWER_DOMAIN_PIPE_A) |                      \
 +      BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
 +      BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
 +      BIT(POWER_DOMAIN_PORT_CRT) |                    \
 +      BIT(POWER_DOMAIN_INIT))
 +#define HSW_DISPLAY_POWER_DOMAINS (                           \
 +      (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define BDW_ALWAYS_ON_POWER_DOMAINS (                 \
 +      HSW_ALWAYS_ON_POWER_DOMAINS |                   \
 +      BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 +#define BDW_DISPLAY_POWER_DOMAINS (                           \
 +      (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |    \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define VLV_ALWAYS_ON_POWER_DOMAINS   BIT(POWER_DOMAIN_INIT)
 +#define VLV_DISPLAY_POWER_DOMAINS     POWER_DOMAIN_MASK
 +
 +#define VLV_DPIO_CMN_BC_POWER_DOMAINS (               \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_CRT) |            \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (        \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (        \
 +      BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |  \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (        \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |  \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (        \
 +      BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |  \
 +      BIT(POWER_DOMAIN_INIT))
 +
 +static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
 +      .sync_hw = i9xx_always_on_power_well_noop,
 +      .enable = i9xx_always_on_power_well_noop,
 +      .disable = i9xx_always_on_power_well_noop,
 +      .is_enabled = i9xx_always_on_power_well_enabled,
 +};
 +
  static struct i915_power_well i9xx_always_on_power_well[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = POWER_DOMAIN_MASK,
 +              .ops = &i9xx_always_on_power_well_ops,
        },
  };
  
 +static const struct i915_power_well_ops hsw_power_well_ops = {
 +      .sync_hw = hsw_power_well_sync_hw,
 +      .enable = hsw_power_well_enable,
 +      .disable = hsw_power_well_disable,
 +      .is_enabled = hsw_power_well_enabled,
 +};
 +
  static struct i915_power_well hsw_power_wells[] = {
        {
                .name = "always-on",
                .always_on = 1,
                .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
 +              .ops = &i9xx_always_on_power_well_ops,
        },
        {
                .name = "display",
 -              .domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
 -              .is_enabled = hsw_power_well_enabled,
 -              .set = hsw_set_power_well,
 +              .domains = HSW_DISPLAY_POWER_DOMAINS,
 +              .ops = &hsw_power_well_ops,
        },
  };
  
@@@ -5748,83 -5378,12 +5750,83 @@@ static struct i915_power_well bdw_power
                .name = "always-on",
                .always_on = 1,
                .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
 +              .ops = &i9xx_always_on_power_well_ops,
        },
        {
                .name = "display",
 -              .domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
 -              .is_enabled = hsw_power_well_enabled,
 -              .set = hsw_set_power_well,
 +              .domains = BDW_DISPLAY_POWER_DOMAINS,
 +              .ops = &hsw_power_well_ops,
 +      },
 +};
 +
 +static const struct i915_power_well_ops vlv_display_power_well_ops = {
 +      .sync_hw = vlv_power_well_sync_hw,
 +      .enable = vlv_display_power_well_enable,
 +      .disable = vlv_display_power_well_disable,
 +      .is_enabled = vlv_power_well_enabled,
 +};
 +
 +static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
 +      .sync_hw = vlv_power_well_sync_hw,
 +      .enable = vlv_power_well_enable,
 +      .disable = vlv_power_well_disable,
 +      .is_enabled = vlv_power_well_enabled,
 +};
 +
 +static struct i915_power_well vlv_power_wells[] = {
 +      {
 +              .name = "always-on",
 +              .always_on = 1,
 +              .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
 +              .ops = &i9xx_always_on_power_well_ops,
 +      },
 +      {
 +              .name = "display",
 +              .domains = VLV_DISPLAY_POWER_DOMAINS,
 +              .data = PUNIT_POWER_WELL_DISP2D,
 +              .ops = &vlv_display_power_well_ops,
 +      },
 +      {
 +              .name = "dpio-common",
 +              .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
 +              .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
 +              .ops = &vlv_dpio_power_well_ops,
 +      },
 +      {
 +              .name = "dpio-tx-b-01",
 +              .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 +              .ops = &vlv_dpio_power_well_ops,
 +              .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
 +      },
 +      {
 +              .name = "dpio-tx-b-23",
 +              .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 +              .ops = &vlv_dpio_power_well_ops,
 +              .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
 +      },
 +      {
 +              .name = "dpio-tx-c-01",
 +              .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 +              .ops = &vlv_dpio_power_well_ops,
 +              .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
 +      },
 +      {
 +              .name = "dpio-tx-c-23",
 +              .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
 +                         VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
 +              .ops = &vlv_dpio_power_well_ops,
 +              .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
        },
  };
  
        (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
  })
  
 -int intel_power_domains_init(struct drm_device *dev)
 +int intel_power_domains_init(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
  
        mutex_init(&power_domains->lock);
         * The enabling order will be from lower to higher indexed wells,
         * the disabling order is reversed.
         */
 -      if (IS_HASWELL(dev)) {
 +      if (IS_HASWELL(dev_priv->dev)) {
                set_power_wells(power_domains, hsw_power_wells);
                hsw_pwr = power_domains;
 -      } else if (IS_BROADWELL(dev)) {
 +      } else if (IS_BROADWELL(dev_priv->dev)) {
                set_power_wells(power_domains, bdw_power_wells);
                hsw_pwr = power_domains;
 +      } else if (IS_VALLEYVIEW(dev_priv->dev)) {
 +              set_power_wells(power_domains, vlv_power_wells);
        } else {
                set_power_wells(power_domains, i9xx_always_on_power_well);
        }
        return 0;
  }
  
 -void intel_power_domains_remove(struct drm_device *dev)
 +void intel_power_domains_remove(struct drm_i915_private *dev_priv)
  {
        hsw_pwr = NULL;
  }
  
 -static void intel_power_domains_resume(struct drm_device *dev)
 +static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        struct i915_power_well *power_well;
        int i;
  
        mutex_lock(&power_domains->lock);
 -      for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
 -              if (power_well->set)
 -                      power_well->set(dev, power_well, power_well->count > 0);
 -      }
 +      for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
 +              power_well->ops->sync_hw(dev_priv, power_well);
        mutex_unlock(&power_domains->lock);
  }
  
 -/*
 - * Starting with Haswell, we have a "Power Down Well" that can be turned off
 - * when not needed anymore. We have 4 registers that can request the power well
 - * to be enabled, and it will only be disabled if none of the registers is
 - * requesting it to be enabled.
 - */
 -void intel_power_domains_init_hw(struct drm_device *dev)
 +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
        /* For now, we need the power well to be always enabled. */
 -      intel_display_set_init_power(dev, true);
 -      intel_power_domains_resume(dev);
 -
 -      if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
 -              return;
 -
 -      /* We're taking over the BIOS, so clear any requests made by it since
 -       * the driver is in charge now. */
 -      if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
 -              I915_WRITE(HSW_PWR_WELL_BIOS, 0);
 +      intel_display_set_init_power(dev_priv, true);
 +      intel_power_domains_resume(dev_priv);
  }
  
  /* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
@@@ -6156,9 -5733,10 +6158,9 @@@ void intel_pm_setup(struct drm_device *
  
        mutex_init(&dev_priv->pc8.lock);
        dev_priv->pc8.requirements_met = false;
 -      dev_priv->pc8.gpu_idle = false;
        dev_priv->pc8.irqs_disabled = false;
        dev_priv->pc8.enabled = false;
 -      dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
 +      dev_priv->pc8.disable_count = 1; /* requirements_met */
        INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
@@@ -531,11 -531,9 +531,11 @@@ init_pipe_control(struct intel_ring_buf
                goto err;
        }
  
 -      i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 +      ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 +      if (ret)
 +              goto err_unref;
  
 -      ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
 +      ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
        if (ret)
                goto err_unref;
  
        return 0;
  
  err_unpin:
 -      i915_gem_object_unpin(ring->scratch.obj);
 +      i915_gem_object_ggtt_unpin(ring->scratch.obj);
  err_unref:
        drm_gem_object_unreference(&ring->scratch.obj->base);
  err:
@@@ -571,7 -569,7 +571,7 @@@ static int init_render_ring(struct inte
         * to use MI_WAIT_FOR_EVENT within the CS. It should already be
         * programmed to '1' on all products.
         *
 -       * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 +       * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
         */
        if (INTEL_INFO(dev)->gen >= 6)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@@ -627,7 -625,7 +627,7 @@@ static void render_ring_cleanup(struct 
  
        if (INTEL_INFO(dev)->gen >= 5) {
                kunmap(sg_page(ring->scratch.obj->pages->sgl));
 -              i915_gem_object_unpin(ring->scratch.obj);
 +              i915_gem_object_ggtt_unpin(ring->scratch.obj);
        }
  
        drm_gem_object_unreference(&ring->scratch.obj->base);
@@@ -1255,7 -1253,7 +1255,7 @@@ static void cleanup_status_page(struct 
                return;
  
        kunmap(sg_page(obj->pages->sgl));
 -      i915_gem_object_unpin(obj);
 +      i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
  }
@@@ -1273,13 -1271,12 +1273,13 @@@ static int init_status_page(struct inte
                goto err;
        }
  
 -      i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 +      ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 +      if (ret)
 +              goto err_unref;
  
 -      ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
 -      if (ret != 0) {
 +      ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
 +      if (ret)
                goto err_unref;
 -      }
  
        ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
        return 0;
  
  err_unpin:
 -      i915_gem_object_unpin(obj);
 +      i915_gem_object_ggtt_unpin(obj);
  err_unref:
        drm_gem_object_unreference(&obj->base);
  err:
@@@ -1359,7 -1356,7 +1359,7 @@@ static int intel_init_ring_buffer(struc
  
        ring->obj = obj;
  
 -      ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
 +      ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
        if (ret)
                goto err_unref;
  
        if (IS_I830(ring->dev) || IS_845G(ring->dev))
                ring->effective_size -= 128;
  
 +      i915_cmd_parser_init_ring(ring);
 +
        return 0;
  
  err_unmap:
        iounmap(ring->virtual_start);
  err_unpin:
 -      i915_gem_object_unpin(obj);
 +      i915_gem_object_ggtt_unpin(obj);
  err_unref:
        drm_gem_object_unreference(&obj->base);
        ring->obj = NULL;
@@@ -1423,7 -1418,7 +1423,7 @@@ void intel_cleanup_ring_buffer(struct i
  
        iounmap(ring->virtual_start);
  
 -      i915_gem_object_unpin(ring->obj);
 +      i915_gem_object_ggtt_unpin(ring->obj);
        drm_gem_object_unreference(&ring->obj->base);
        ring->obj = NULL;
        ring->preallocated_lazy_request = NULL;
        cleanup_status_page(ring);
  }
  
 -static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
 -{
 -      int ret;
 -
 -      ret = i915_wait_seqno(ring, seqno);
 -      if (!ret)
 -              i915_gem_retire_requests_ring(ring);
 -
 -      return ret;
 -}
 -
  static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  {
        struct drm_i915_gem_request *request;
 -      u32 seqno = 0;
 +      u32 seqno = 0, tail;
        int ret;
  
 -      i915_gem_retire_requests_ring(ring);
 -
        if (ring->last_retired_head != -1) {
                ring->head = ring->last_retired_head;
                ring->last_retired_head = -1;
 +
                ring->space = ring_space(ring);
                if (ring->space >= n)
                        return 0;
                        space += ring->size;
                if (space >= n) {
                        seqno = request->seqno;
 +                      tail = request->tail;
                        break;
                }
  
        if (seqno == 0)
                return -ENOSPC;
  
 -      ret = intel_ring_wait_seqno(ring, seqno);
 +      ret = i915_wait_seqno(ring, seqno);
        if (ret)
                return ret;
  
 -      if (WARN_ON(ring->last_retired_head == -1))
 -              return -ENOSPC;
 -
 -      ring->head = ring->last_retired_head;
 -      ring->last_retired_head = -1;
 +      ring->head = tail;
        ring->space = ring_space(ring);
        if (WARN_ON(ring->space < n))
                return -ENOSPC;
@@@ -1518,8 -1528,7 +1518,8 @@@ static int ring_wait_for_space(struct i
                        return 0;
                }
  
 -              if (dev->primary->master) {
 +              if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
 +                  dev->primary->master) {
                        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
                        if (master_priv->sarea_priv)
                                master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
@@@ -1644,6 -1653,27 +1644,27 @@@ int intel_ring_begin(struct intel_ring_
        return 0;
  }
  
+ /* Align the ring tail to a cacheline boundary */
+ int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+ {
+       int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+       int ret;
+       if (num_dwords == 0)
+               return 0;
+       ret = intel_ring_begin(ring, num_dwords);
+       if (ret)
+               return ret;
+       while (num_dwords--)
+               intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       return 0;
+ }
  void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@@ -1924,7 -1954,7 +1945,7 @@@ int intel_init_render_ring_buffer(struc
                        return -ENOMEM;
                }
  
 -              ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
 +              ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
                if (ret != 0) {
                        drm_gem_object_unreference(&obj->base);
                        DRM_ERROR("Failed to ping batch bo\n");
@@@ -41,8 -41,6 +41,8 @@@ enum intel_ring_hangcheck_action 
        HANGCHECK_HUNG,
  };
  
 +#define HANGCHECK_SCORE_RING_HUNG 31
 +
  struct intel_ring_hangcheck {
        bool deadlock;
        u32 seqno;
@@@ -164,38 -162,6 +164,38 @@@ struct  intel_ring_buffer 
                u32 gtt_offset;
                volatile u32 *cpu_page;
        } scratch;
 +
 +      /*
 +       * Tables of commands the command parser needs to know about
 +       * for this ring.
 +       */
 +      const struct drm_i915_cmd_table *cmd_tables;
 +      int cmd_table_count;
 +
 +      /*
 +       * Table of registers allowed in commands that read/write registers.
 +       */
 +      const u32 *reg_table;
 +      int reg_count;
 +
 +      /*
 +       * Table of registers allowed in commands that read/write registers, but
 +       * only from the DRM master.
 +       */
 +      const u32 *master_reg_table;
 +      int master_reg_count;
 +
 +      /*
 +       * Returns the bitmask for the length field of the specified command.
 +       * Return 0 for an unrecognized/invalid command.
 +       *
 +       * If the command parser finds an entry for a command in the ring's
 +       * cmd_tables, it gets the command's length based on the table entry.
 +       * If not, it calls this function to determine the per-ring length field
 +       * encoding for the command (i.e. certain opcode ranges use certain bits
 +       * to encode the command length in the header).
 +       */
 +      u32 (*get_cmd_length_mask)(u32 cmd_header);
  };
  
  static inline bool
@@@ -267,6 -233,7 +267,7 @@@ intel_write_status_page(struct intel_ri
  void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
  
  int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+ int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
  static inline void intel_ring_emit(struct intel_ring_buffer *ring,
                                   u32 data)
  {
diff --combined include/drm/drm_crtc.h
@@@ -907,6 -907,9 +907,9 @@@ struct drm_mode_config 
  
        /* whether async page flip is supported or not */
        bool async_page_flip;
+       /* cursor size */
+       uint32_t cursor_width, cursor_height;
  };
  
  #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@@ -991,7 -994,6 +994,7 @@@ extern void drm_encoder_cleanup(struct 
  
  extern const char *drm_get_connector_name(const struct drm_connector *connector);
  extern const char *drm_get_connector_status_name(enum drm_connector_status status);
 +extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
  extern const char *drm_get_dpms_name(int val);
  extern const char *drm_get_dvi_i_subconnector_name(int val);
  extern const char *drm_get_dvi_i_select_name(int val);