drm/i915: s/hotplug_irq_storm_detect/intel_hpd_irq_handler/
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
index 0aa2ef0..378ade0 100644 (file)
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 };
 
-static const u32 hpd_status_i965[] = {
-        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
-        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
-        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
-        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
-        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-};
-
 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -95,6 +86,8 @@ static void i915_hpd_irq_setup(struct drm_device *dev);
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((dev_priv->irq_mask & mask) != 0) {
                dev_priv->irq_mask &= ~mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -105,6 +98,8 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 static void
 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((dev_priv->irq_mask & mask) != mask) {
                dev_priv->irq_mask |= mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -112,6 +107,215 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }
 
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc;
+       enum pipe pipe;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       for_each_pipe(pipe) {
+               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+               if (crtc->cpu_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe;
+       struct intel_crtc *crtc;
+
+       for_each_pipe(pipe) {
+               crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+               if (crtc->pch_fifo_underrun_disabled)
+                       return false;
+       }
+
+       return true;
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+                                         DE_PIPEB_FIFO_UNDERRUN;
+
+       if (enable)
+               ironlake_enable_display_irq(dev_priv, bit);
+       else
+               ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+                                                 bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (enable) {
+               if (!ivb_can_enable_err_int(dev))
+                       return;
+
+               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
+                                        ERR_INT_FIFO_UNDERRUN_B |
+                                        ERR_INT_FIFO_UNDERRUN_C);
+
+               ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       } else {
+               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       }
+}
+
+static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+                                           bool enable)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
+                                               SDE_TRANSB_FIFO_UNDER;
+
+       if (enable)
+               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+       else
+               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
+
+       POSTING_READ(SDEIMR);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum transcoder pch_transcoder,
+                                           bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (enable) {
+               if (!cpt_can_enable_serr_int(dev))
+                       return;
+
+               I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
+                                    SERR_INT_TRANS_B_FIFO_UNDERRUN |
+                                    SERR_INT_TRANS_C_FIFO_UNDERRUN);
+
+               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+       } else {
+               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
+       }
+
+       POSTING_READ(SDEIMR);
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pipe: pipe
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable CPU fifo underruns for a specific
+ * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
+ * reporting for one pipe may also disable all the other CPU error interruts for
+ * the other pipes, due to the fact that there's just one interrupt mask/enable
+ * bit for all the pipes.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                          enum pipe pipe, bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+       ret = !intel_crtc->cpu_fifo_underrun_disabled;
+
+       if (enable == ret)
+               goto done;
+
+       intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+       if (IS_GEN5(dev) || IS_GEN6(dev))
+               ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+       else if (IS_GEN7(dev))
+               ivybridge_set_fifo_underrun_reporting(dev, enable);
+
+done:
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+                                          enum transcoder pch_transcoder,
+                                          bool enable)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe p;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       unsigned long flags;
+       bool ret;
+
+       if (HAS_PCH_LPT(dev)) {
+               crtc = NULL;
+               for_each_pipe(p) {
+                       struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
+                       if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
+                               crtc = c;
+                               break;
+                       }
+               }
+               if (!crtc) {
+                       DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
+                       return false;
+               }
+       } else {
+               crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+       }
+       intel_crtc = to_intel_crtc(crtc);
+
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+       ret = !intel_crtc->pch_fifo_underrun_disabled;
+
+       if (enable == ret)
+               goto done;
+
+       intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+       if (HAS_PCH_IBX(dev))
+               ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+       else
+               cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
+
+done:
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       return ret;
+}
+
+
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
@@ -142,28 +346,21 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 }
 
 /**
- * intel_enable_asle - enable ASLE interrupt for OpRegion
+ * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  */
-void intel_enable_asle(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long irqflags;
 
-       /* FIXME: opregion/asle for VLV */
-       if (IS_VALLEYVIEW(dev))
+       if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
                return;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
-       if (HAS_PCH_SPLIT(dev))
-               ironlake_enable_display_irq(dev_priv, DE_GSE);
-       else {
-               i915_enable_pipestat(dev_priv, 1,
-                                    PIPE_LEGACY_BLC_EVENT_ENABLE);
-               if (INTEL_INFO(dev)->gen >= 4)
-                       i915_enable_pipestat(dev_priv, 0,
-                                            PIPE_LEGACY_BLC_EVENT_ENABLE);
-       }
+       i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+       if (INTEL_INFO(dev)->gen >= 4)
+               i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -181,10 +378,16 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
 
-       return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Locking is horribly broken here, but whatever. */
+               struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+               return intel_crtc->active;
+       } else {
+               return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+       }
 }
 
 /* Called from drm generic code, passed a 'crtc', which
@@ -334,6 +537,21 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                                                     crtc);
 }
 
+static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+{
+       enum drm_connector_status old_status;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+       old_status = connector->status;
+
+       connector->status = connector->funcs->detect(connector, false);
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+                     connector->base.id,
+                     drm_get_connector_name(connector),
+                     old_status, connector->status);
+       return (old_status != connector->status);
+}
+
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
@@ -350,6 +568,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
        struct drm_connector *connector;
        unsigned long irqflags;
        bool hpd_disabled = false;
+       bool changed = false;
+       u32 hpd_event_bits;
 
        /* HPD irq before everything is fully set up. */
        if (!dev_priv->enable_hotplug_processing)
@@ -359,6 +579,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
        DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+       hpd_event_bits = dev_priv->hpd_event_bits;
+       dev_priv->hpd_event_bits = 0;
        list_for_each_entry(connector, &mode_config->connector_list, head) {
                intel_connector = to_intel_connector(connector);
                intel_encoder = intel_connector->encoder;
@@ -373,6 +596,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
                                | DRM_CONNECTOR_POLL_DISCONNECT;
                        hpd_disabled = true;
                }
+               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+                       DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+                                     drm_get_connector_name(connector), intel_encoder->hpd_pin);
+               }
        }
         /* if there were no outputs to poll, poll was disabled,
          * therefore make sure it's enabled when disabling HPD on
@@ -385,14 +612,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
-       list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
-               if (intel_encoder->hot_plug)
-                       intel_encoder->hot_plug(intel_encoder);
-
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               intel_connector = to_intel_connector(connector);
+               intel_encoder = intel_connector->encoder;
+               if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+                       if (intel_encoder->hot_plug)
+                               intel_encoder->hot_plug(intel_encoder);
+                       if (intel_hpd_irq_event(dev, connector))
+                               changed = true;
+               }
+       }
        mutex_unlock(&mode_config->mutex);
 
-       /* Just fire off a uevent and let userspace tell us what to do */
-       drm_helper_hpd_irq_event(dev);
+       if (changed)
+               drm_kms_helper_hotplug_event(dev);
 }
 
 static void ironlake_handle_rps_change(struct drm_device *dev)
@@ -447,7 +680,6 @@ static void notify_ring(struct drm_device *dev,
 
        wake_up_all(&ring->irq_queue);
        if (i915_enable_hangcheck) {
-               dev_priv->gpu_error.hangcheck_count = 0;
                mod_timer(&dev_priv->gpu_error.hangcheck_timer,
                          round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
        }
@@ -464,25 +696,48 @@ static void gen6_pm_rps_work(struct work_struct *work)
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
        pm_imr = I915_READ(GEN6_PMIMR);
-       I915_WRITE(GEN6_PMIMR, 0);
+       /* Make sure not to corrupt PMIMR state used by ringbuffer code */
+       I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
        spin_unlock_irq(&dev_priv->rps.lock);
 
-       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
+       if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
                return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                new_delay = dev_priv->rps.cur_delay + 1;
-       else
+
+               /*
+                * For better performance, jump directly
+                * to RPe if we're below it.
+                */
+               if (IS_VALLEYVIEW(dev_priv->dev) &&
+                   dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+                       new_delay = dev_priv->rps.rpe_delay;
+       } else
                new_delay = dev_priv->rps.cur_delay - 1;
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
-       if (!(new_delay > dev_priv->rps.max_delay ||
-             new_delay < dev_priv->rps.min_delay)) {
-               gen6_set_rps(dev_priv->dev, new_delay);
+       if (new_delay >= dev_priv->rps.min_delay &&
+           new_delay <= dev_priv->rps.max_delay) {
+               if (IS_VALLEYVIEW(dev_priv->dev))
+                       valleyview_set_rps(dev_priv->dev, new_delay);
+               else
+                       gen6_set_rps(dev_priv->dev, new_delay);
+       }
+
+       if (IS_VALLEYVIEW(dev_priv->dev)) {
+               /*
+                * On VLV, when we enter RC6 we may not be at the minimum
+                * voltage level, so arm a timer to check.  It should only
+                * fire when there's activity or once after we've entered
+                * RC6, and then won't be re-armed until the next RPS interrupt.
+                */
+               mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
+                                msecs_to_jiffies(100));
        }
 
        mutex_unlock(&dev_priv->rps.hw_lock);
@@ -529,7 +784,7 @@ static void ivybridge_parity_work(struct work_struct *work)
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -561,7 +816,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
                return;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -573,25 +828,26 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                               u32 gt_iir)
 {
 
-       if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
-                     GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
                notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+       if (gt_iir & GT_BSD_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[VCS]);
-       if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+       if (gt_iir & GT_BLT_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[BCS]);
 
-       if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
-                     GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-                     GT_RENDER_CS_ERROR_INTERRUPT)) {
+       if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+                     GT_BSD_CS_ERROR_INTERRUPT |
+                     GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
                DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
                i915_handle_error(dev, false);
        }
 
-       if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+       if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
                ivybridge_handle_parity_error(dev);
 }
 
+/* Legacy way of handling PM interrupts */
 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
                                u32 pm_iir)
 {
@@ -619,9 +875,9 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 #define HPD_STORM_DETECT_PERIOD 1000
 #define HPD_STORM_THRESHOLD 5
 
-static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
-                                           u32 hotplug_trigger,
-                                           const u32 *hpd)
+static inline bool intel_hpd_irq_handler(struct drm_device *dev,
+                                        u32 hotplug_trigger,
+                                        const u32 *hpd)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -636,6 +892,7 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
                        continue;
 
+               dev_priv->hpd_event_bits |= (1 << i);
                if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
                                   dev_priv->hpd_stats[i].hpd_last_jiffies
                                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -643,6 +900,7 @@ static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
                        dev_priv->hpd_stats[i].hpd_cnt = 0;
                } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
                        dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+                       dev_priv->hpd_event_bits &= ~(1 << i);
                        DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
                        ret = true;
                } else {
@@ -669,6 +927,38 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+                              u32 pm_iir)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->rps.lock, flags);
+       dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+       if (dev_priv->rps.pm_iir) {
+               I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+               /* never want to mask useful interrupts. (also posting read) */
+               WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+               /* TODO: if queue_work is slow, move it out of the spinlock */
+               queue_work(dev_priv->wq, &dev_priv->rps.work);
+       }
+       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+       if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+               if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+                       notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+
+               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+                       DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+                       i915_handle_error(dev_priv->dev, false);
+               }
+       }
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -728,7 +1018,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                         hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -740,7 +1030,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
                        gmbus_irq_handler(dev);
 
-               if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               if (pm_iir & GEN6_PM_RPS_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
 
                I915_WRITE(GTIIR, gt_iir);
@@ -759,14 +1049,16 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
        if (hotplug_trigger) {
-               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
+               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx))
                        ibx_hpd_irq_setup(dev);
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
        }
-       if (pch_iir & SDE_AUDIO_POWER_MASK)
+       if (pch_iir & SDE_AUDIO_POWER_MASK) {
+               int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
+                              SDE_AUDIO_POWER_SHIFT);
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-                                (pch_iir & SDE_AUDIO_POWER_MASK) >>
-                                SDE_AUDIO_POWER_SHIFT);
+                                port_name(port));
+       }
 
        if (pch_iir & SDE_AUX_MASK)
                dp_aux_irq_handler(dev);
@@ -795,10 +1087,64 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
                DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
-       if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-               DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-               DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+                                                         false))
+                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+       if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+                                                         false))
+                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+}
+
+static void ivb_err_int_handler(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 err_int = I915_READ(GEN7_ERR_INT);
+
+       if (err_int & ERR_INT_POISON)
+               DRM_ERROR("Poison interrupt\n");
+
+       if (err_int & ERR_INT_FIFO_UNDERRUN_A)
+               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+                       DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+       if (err_int & ERR_INT_FIFO_UNDERRUN_B)
+               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+                       DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+       if (err_int & ERR_INT_FIFO_UNDERRUN_C)
+               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
+                       DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+
+       I915_WRITE(GEN7_ERR_INT, err_int);
+}
+
+static void cpt_serr_int_handler(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 serr_int = I915_READ(SERR_INT);
+
+       if (serr_int & SERR_INT_POISON)
+               DRM_ERROR("PCH poison interrupt\n");
+
+       if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
+               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+                                                         false))
+                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+       if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
+               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+                                                         false))
+                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+
+       if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
+               if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
+                                                         false))
+                       DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+
+       I915_WRITE(SERR_INT, serr_int);
 }
 
 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
@@ -808,14 +1154,16 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
        if (hotplug_trigger) {
-               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
+               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt))
                        ibx_hpd_irq_setup(dev);
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
        }
-       if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
-               DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-                                (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
-                                SDE_AUDIO_POWER_SHIFT_CPT);
+       if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
+               int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+                              SDE_AUDIO_POWER_SHIFT_CPT);
+               DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
+                                port_name(port));
+       }
 
        if (pch_iir & SDE_AUX_MASK_CPT)
                dp_aux_irq_handler(dev);
@@ -834,6 +1182,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                        DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
                                         pipe_name(pipe),
                                         I915_READ(FDI_RX_IIR(pipe)));
+
+       if (pch_iir & SDE_ERROR_CPT)
+               cpt_serr_int_handler(dev);
 }
 
 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
@@ -846,6 +1197,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        atomic_inc(&dev_priv->irq_received);
 
+       /* We get interrupts on unclaimed registers, so check for this before we
+        * do any I915_{READ,WRITE}. */
+       if (IS_HASWELL(dev) &&
+           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unclaimed register before interrupt\n");
+               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -861,6 +1220,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
                POSTING_READ(SDEIER);
        }
 
+       /* On Haswell, also mask ERR_INT because we don't want to risk
+        * generating "unclaimed register" interrupts from inside the interrupt
+        * handler. */
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
+
        gt_iir = I915_READ(GTIIR);
        if (gt_iir) {
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -870,11 +1238,14 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        de_iir = I915_READ(DEIIR);
        if (de_iir) {
+               if (de_iir & DE_ERR_INT_IVB)
+                       ivb_err_int_handler(dev);
+
                if (de_iir & DE_AUX_CHANNEL_A_IVB)
                        dp_aux_irq_handler(dev);
 
                if (de_iir & DE_GSE_IVB)
-                       intel_opregion_gse_intr(dev);
+                       intel_opregion_asle_intr(dev);
 
                for (i = 0; i < 3; i++) {
                        if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
@@ -901,12 +1272,21 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        pm_iir = I915_READ(GEN6_PMIIR);
        if (pm_iir) {
-               if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               if (IS_HASWELL(dev))
+                       hsw_pm_irq_handler(dev_priv, pm_iir);
+               else if (pm_iir & GEN6_PM_RPS_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
                I915_WRITE(GEN6_PMIIR, pm_iir);
                ret = IRQ_HANDLED;
        }
 
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               if (ivb_can_enable_err_int(dev))
+                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
+
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
        if (!HAS_PCH_NOP(dev)) {
@@ -921,9 +1301,10 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-       if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
                notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & GT_BSD_USER_INTERRUPT)
+       if (gt_iir & ILK_BSD_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[VCS]);
 }
 
@@ -968,7 +1349,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                dp_aux_irq_handler(dev);
 
        if (de_iir & DE_GSE)
-               intel_opregion_gse_intr(dev);
+               intel_opregion_asle_intr(dev);
 
        if (de_iir & DE_PIPEA_VBLANK)
                drm_handle_vblank(dev, 0);
@@ -976,6 +1357,17 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (de_iir & DE_PIPEB_VBLANK)
                drm_handle_vblank(dev, 1);
 
+       if (de_iir & DE_POISON)
+               DRM_ERROR("Poison interrupt\n");
+
+       if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+                       DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+       if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+                       DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
        if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
                intel_finish_page_flip_plane(dev, 0);
@@ -1002,7 +1394,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
                ironlake_handle_rps_change(dev);
 
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+       if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
                gen6_queue_rps_work(dev_priv, pm_iir);
 
        I915_WRITE(GTIIR, gt_iir);
@@ -1222,11 +1614,13 @@ i915_error_state_free(struct kref *error_ref)
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                i915_error_object_free(error->ring[i].batchbuffer);
                i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].ctx);
                kfree(error->ring[i].requests);
        }
 
        kfree(error->active_bo);
        kfree(error->overlay);
+       kfree(error->display);
        kfree(error);
 }
 static void capture_bo(struct drm_i915_error_buffer *err,
@@ -1273,7 +1667,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
        struct drm_i915_gem_object *obj;
        int i = 0;
 
-       list_for_each_entry(obj, head, gtt_list) {
+       list_for_each_entry(obj, head, global_list) {
                if (obj->pin_count == 0)
                        continue;
 
@@ -1415,7 +1809,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
        if (ring->id != RCS || !error->ccid)
                return;
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
                                                                    obj, 1);
@@ -1552,7 +1946,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
                i++;
        error->active_bo_count = i;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
                        i++;
        error->pinned_bo_count = i - error->active_bo_count;
@@ -1932,38 +2326,28 @@ ring_last_seqno(struct intel_ring_buffer *ring)
                          struct drm_i915_gem_request, list)->seqno;
 }
 
-static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+static bool
+ring_idle(struct intel_ring_buffer *ring, u32 seqno)
 {
-       if (list_empty(&ring->request_list) ||
-           i915_seqno_passed(ring->get_seqno(ring, false),
-                             ring_last_seqno(ring))) {
-               /* Issue a wake-up to catch stuck h/w. */
-               if (waitqueue_active(&ring->irq_queue)) {
-                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                 ring->name);
-                       wake_up_all(&ring->irq_queue);
-                       *err = true;
-               }
-               return true;
-       }
-       return false;
+       return (list_empty(&ring->request_list) ||
+               i915_seqno_passed(seqno, ring_last_seqno(ring)));
 }
 
-static bool semaphore_passed(struct intel_ring_buffer *ring)
+static struct intel_ring_buffer *
+semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
-       struct intel_ring_buffer *signaller;
-       u32 cmd, ipehr, acthd_min;
+       u32 cmd, ipehr, acthd, acthd_min;
 
        ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
        if ((ipehr & ~(0x3 << 16)) !=
            (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
-               return false;
+               return NULL;
 
        /* ACTHD is likely pointing to the dword after the actual command,
         * so scan backwards until we find the MBOX.
         */
+       acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
        acthd_min = max((int)acthd - 3 * 4, 0);
        do {
                cmd = ioread32(ring->virtual_start + acthd);
@@ -1972,124 +2356,216 @@ static bool semaphore_passed(struct intel_ring_buffer *ring)
 
                acthd -= 4;
                if (acthd < acthd_min)
-                       return false;
+                       return NULL;
        } while (1);
 
-       signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
-       return i915_seqno_passed(signaller->get_seqno(signaller, false),
-                                ioread32(ring->virtual_start+acthd+4)+1);
+       *seqno = ioread32(ring->virtual_start+acthd+4)+1;
+       return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
 }
 
-static bool kick_ring(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp = I915_READ_CTL(ring);
-       if (tmp & RING_WAIT) {
-               DRM_ERROR("Kicking stuck wait on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct intel_ring_buffer *signaller;
+       u32 seqno, ctl;
 
-       if (INTEL_INFO(dev)->gen >= 6 &&
-           tmp & RING_WAIT_SEMAPHORE &&
-           semaphore_passed(ring)) {
-               DRM_ERROR("Kicking stuck semaphore on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
-       return false;
+       ring->hangcheck.deadlock = true;
+
+       signaller = semaphore_waits_for(ring, &seqno);
+       if (signaller == NULL || signaller->hangcheck.deadlock)
+               return -1;
+
+       /* cursory check for an unkickable deadlock */
+       ctl = I915_READ_CTL(signaller);
+       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
+               return -1;
+
+       return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
 }
 
-static bool i915_hangcheck_hung(struct drm_device *dev)
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       if (dev_priv->gpu_error.hangcheck_count++ > 1) {
-               bool hung = true;
+       struct intel_ring_buffer *ring;
+       int i;
 
-               DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
-               i915_handle_error(dev, true);
+       for_each_ring(ring, dev_priv, i)
+               ring->hangcheck.deadlock = false;
+}
 
-               if (!IS_GEN2(dev)) {
-                       struct intel_ring_buffer *ring;
-                       int i;
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
 
-                       /* Is the chip hanging on a WAIT_FOR_EVENT?
-                        * If so we can simply poke the RB_WAIT bit
-                        * and break the hang. This should work on
-                        * all but the second generation chipsets.
-                        */
-                       for_each_ring(ring, dev_priv, i)
-                               hung &= !kick_ring(ring);
-               }
+       if (ring->hangcheck.acthd != acthd)
+               return active;
 
+       if (IS_GEN2(dev))
                return hung;
+
+       /* Is the chip hanging on a WAIT_FOR_EVENT?
+        * If so we can simply poke the RB_WAIT bit
+        * and break the hang. This should work on
+        * all but the second generation chipsets.
+        */
+       tmp = I915_READ_CTL(ring);
+       if (tmp & RING_WAIT) {
+               DRM_ERROR("Kicking stuck wait on %s\n",
+                         ring->name);
+               I915_WRITE_CTL(ring, tmp);
+               return kick;
        }
 
-       return false;
+       if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+               switch (semaphore_passed(ring)) {
+               default:
+                       return hung;
+               case 1:
+                       DRM_ERROR("Kicking stuck semaphore on %s\n",
+                                 ring->name);
+                       I915_WRITE_CTL(ring, tmp);
+                       return kick;
+               case 0:
+                       return wait;
+               }
+       }
+
+       return hung;
 }
 
 /**
  * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. The first time this is called we simply record
- * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
- * again, we assume the chip is wedged and try to fix it.
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
  */
 void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
        struct intel_ring_buffer *ring;
-       bool err = false, idle;
        int i;
+       int busy_count = 0, rings_hung = 0;
+       bool stuck[I915_NUM_RINGS] = { 0 };
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define FIRE 30
 
        if (!i915_enable_hangcheck)
                return;
 
-       memset(acthd, 0, sizeof(acthd));
-       idle = true;
        for_each_ring(ring, dev_priv, i) {
-           idle &= i915_hangcheck_ring_idle(ring, &err);
-           acthd[i] = intel_ring_get_active_head(ring);
-       }
+               u32 seqno, acthd;
+               bool busy = true;
+
+               semaphore_clear_deadlocks(dev_priv);
+
+               seqno = ring->get_seqno(ring, false);
+               acthd = intel_ring_get_active_head(ring);
+
+               if (ring->hangcheck.seqno == seqno) {
+                       if (ring_idle(ring, seqno)) {
+                               if (waitqueue_active(&ring->irq_queue)) {
+                                       /* Issue a wake-up to catch stuck h/w. */
+                                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+                                                 ring->name);
+                                       wake_up_all(&ring->irq_queue);
+                                       ring->hangcheck.score += HUNG;
+                               } else
+                                       busy = false;
+                       } else {
+                               int score;
+
+                               /* We always increment the hangcheck score
+                                * if the ring is busy and still processing
+                                * the same request, so that no single request
+                                * can run indefinitely (such as a chain of
+                                * batches). The only time we do not increment
+                                * the hangcheck score on this ring, if this
+                                * ring is in a legitimate wait for another
+                                * ring. In that case the waiting ring is a
+                                * victim and we want to be sure we catch the
+                                * right culprit. Then every time we do kick
+                                * the ring, add a small increment to the
+                                * score so that we can catch a batch that is
+                                * being repeatedly kicked and so responsible
+                                * for stalling the machine.
+                                */
+                               ring->hangcheck.action = ring_stuck(ring,
+                                                                   acthd);
+
+                               switch (ring->hangcheck.action) {
+                               case wait:
+                                       score = 0;
+                                       break;
+                               case active:
+                                       score = BUSY;
+                                       break;
+                               case kick:
+                                       score = KICK;
+                                       break;
+                               case hung:
+                                       score = HUNG;
+                                       stuck[i] = true;
+                                       break;
+                               }
+                               ring->hangcheck.score += score;
+                       }
+               } else {
+                       /* Gradually reduce the count so that we catch DoS
+                        * attempts across multiple batches.
+                        */
+                       if (ring->hangcheck.score > 0)
+                               ring->hangcheck.score--;
+               }
 
-       /* If all work is done then ACTHD clearly hasn't advanced. */
-       if (idle) {
-               if (err) {
-                       if (i915_hangcheck_hung(dev))
-                               return;
+               ring->hangcheck.seqno = seqno;
+               ring->hangcheck.acthd = acthd;
+               busy_count += busy;
+       }
 
-                       goto repeat;
+       for_each_ring(ring, dev_priv, i) {
+               if (ring->hangcheck.score > FIRE) {
+                       DRM_ERROR("%s on %s\n",
+                                 stuck[i] ? "stuck" : "no progress",
+                                 ring->name);
+                       rings_hung++;
                }
-
-               dev_priv->gpu_error.hangcheck_count = 0;
-               return;
        }
 
-       i915_get_extra_instdone(dev, instdone);
-       if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
-                  sizeof(acthd)) == 0 &&
-           memcmp(dev_priv->gpu_error.prev_instdone, instdone,
-                  sizeof(instdone)) == 0) {
-               if (i915_hangcheck_hung(dev))
-                       return;
-       } else {
-               dev_priv->gpu_error.hangcheck_count = 0;
+       if (rings_hung)
+               return i915_handle_error(dev, true);
 
-               memcpy(dev_priv->gpu_error.last_acthd, acthd,
-                      sizeof(acthd));
-               memcpy(dev_priv->gpu_error.prev_instdone, instdone,
-                      sizeof(instdone));
-       }
+       if (busy_count)
+               /* Reset timer case chip hangs without another request
+                * being added */
+               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+                         round_jiffies_up(jiffies +
+                                          DRM_I915_HANGCHECK_JIFFIES));
+}
+
+static void ibx_irq_preinstall(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (HAS_PCH_NOP(dev))
+               return;
 
-repeat:
-       /* Reset timer case chip hangs without another request being added */
-       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+       /* south display irq */
+       I915_WRITE(SDEIMR, 0xffffffff);
+       /*
+        * SDEIER is also touched by the interrupt handler to work around missed
+        * PCH interrupts. Hence we can't update it after the interrupt handler
+        * is enabled - instead we unconditionally enable all PCH interrupt
+        * sources here, but then only unmask them as needed with SDEIMR.
+        */
+       I915_WRITE(SDEIER, 0xffffffff);
+       POSTING_READ(SDEIER);
 }
 
 /* drm_dma.h hooks
@@ -2113,19 +2589,34 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
        I915_WRITE(GTIER, 0x0);
        POSTING_READ(GTIER);
 
-       if (HAS_PCH_NOP(dev))
-               return;
+       ibx_irq_preinstall(dev);
+}
 
-       /* south display irq */
-       I915_WRITE(SDEIMR, 0xffffffff);
-       /*
-        * SDEIER is also touched by the interrupt handler to work around missed
-        * PCH interrupts. Hence we can't update it after the interrupt handler
-        * is enabled - instead we unconditionally enable all PCH interrupt
-        * sources here, but then only unmask them as needed with SDEIMR.
-        */
-       I915_WRITE(SDEIER, 0xffffffff);
-       POSTING_READ(SDEIER);
+static void ivybridge_irq_preinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       atomic_set(&dev_priv->irq_received, 0);
+
+       I915_WRITE(HWSTAM, 0xeffe);
+
+       /* XXX hotplug from PCH */
+
+       I915_WRITE(DEIMR, 0xffffffff);
+       I915_WRITE(DEIER, 0x0);
+       POSTING_READ(DEIER);
+
+       /* and GT */
+       I915_WRITE(GTIMR, 0xffffffff);
+       I915_WRITE(GTIER, 0x0);
+       POSTING_READ(GTIER);
+
+       /* Power management */
+       I915_WRITE(GEN6_PMIMR, 0xffffffff);
+       I915_WRITE(GEN6_PMIER, 0x0);
+       POSTING_READ(GEN6_PMIER);
+
+       ibx_irq_preinstall(dev);
 }
 
 static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2201,33 +2692,41 @@ static void ibx_irq_postinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 mask;
 
-       if (HAS_PCH_IBX(dev))
-               mask = SDE_GMBUS | SDE_AUX_MASK;
-       else
-               mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
-
        if (HAS_PCH_NOP(dev))
                return;
 
+       if (HAS_PCH_IBX(dev)) {
+               mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
+                      SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+       } else {
+               mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+
+               I915_WRITE(SERR_INT, I915_READ(SERR_INT));
+       }
+
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
        I915_WRITE(SDEIMR, ~mask);
 }
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
+       unsigned long irqflags;
+
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-                          DE_AUX_CHANNEL_A;
-       u32 render_irqs;
+                          DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+                          DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
+       u32 gt_irqs;
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+       I915_WRITE(DEIER, display_mask |
+                         DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
        POSTING_READ(DEIER);
 
        dev_priv->gt_irq_mask = ~0;
@@ -2235,26 +2734,28 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
+       gt_irqs = GT_RENDER_USER_INTERRUPT;
+
        if (IS_GEN6(dev))
-               render_irqs =
-                       GT_USER_INTERRUPT |
-                       GEN6_BSD_USER_INTERRUPT |
-                       GEN6_BLITTER_USER_INTERRUPT;
+               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        else
-               render_irqs =
-                       GT_USER_INTERRUPT |
-                       GT_PIPE_NOTIFY |
-                       GT_BSD_USER_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+               gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+                          ILK_BSD_USER_INTERRUPT;
+
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
        ibx_irq_postinstall(dev);
 
        if (IS_IRONLAKE_M(dev)) {
-               /* Clear & enable PCU event interrupts */
-               I915_WRITE(DEIIR, DE_PCU_EVENT);
-               I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+               /* Enable PCU event interrupts
+                *
+                * spinlocking not required here for correctness since interrupt
+                * setup is guaranteed to run in single-threaded context. But we
+                * need it to make the assert_spin_locked happy. */
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
 
        return 0;
@@ -2269,12 +2770,15 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
                DE_PLANEC_FLIP_DONE_IVB |
                DE_PLANEB_FLIP_DONE_IVB |
                DE_PLANEA_FLIP_DONE_IVB |
-               DE_AUX_CHANNEL_A_IVB;
-       u32 render_irqs;
+               DE_AUX_CHANNEL_A_IVB |
+               DE_ERR_INT_IVB;
+       u32 pm_irqs = GEN6_PM_RPS_EVENTS;
+       u32 gt_irqs;
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
+       I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
        I915_WRITE(DEIER,
@@ -2284,16 +2788,32 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
                   DE_PIPEA_VBLANK_IVB);
        POSTING_READ(DEIER);
 
-       dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-       render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-               GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+                 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
+       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+       if (HAS_VEBOX(dev))
+               pm_irqs |= PM_VEBOX_USER_INTERRUPT |
+                       PM_VEBOX_CS_ERROR_INTERRUPT;
+
+       /* Our enable/disable rps functions may touch these registers so
+        * make sure to set a known state for only the non-RPS bits.
+        * The RMW is extra paranoia since this should be called after being set
+        * to a known state in preinstall.
+        * */
+       I915_WRITE(GEN6_PMIMR,
+                  (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
+       I915_WRITE(GEN6_PMIER,
+                  (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
+       POSTING_READ(GEN6_PMIER);
+
        ibx_irq_postinstall(dev);
 
        return 0;
@@ -2302,10 +2822,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 gt_irqs;
        u32 enable_mask;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
-       u32 render_irqs;
-       u16 msid;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
        enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2321,13 +2840,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
                I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
                I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
-       /* Hack for broken MSIs on VLV */
-       pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
-       pci_read_config_word(dev->pdev, 0x98, &msid);
-       msid &= 0xff; /* mask out delivery bits */
-       msid |= (1<<14);
-       pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
-
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
@@ -2348,9 +2860,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-       render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-               GEN6_BLITTER_USER_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+               GT_BLT_USER_INTERRUPT;
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
        /* ack & enable invalid PTE error interrupts */
@@ -2402,6 +2914,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(DEIMR, 0xffffffff);
        I915_WRITE(DEIER, 0x0);
        I915_WRITE(DEIIR, I915_READ(DEIIR));
+       if (IS_GEN7(dev))
+               I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 
        I915_WRITE(GTIMR, 0xffffffff);
        I915_WRITE(GTIER, 0x0);
@@ -2413,6 +2927,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
        I915_WRITE(SDEIMR, 0xffffffff);
        I915_WRITE(SDEIER, 0x0);
        I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+       if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+               I915_WRITE(SERR_INT, I915_READ(SERR_INT));
 }
 
 static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -2626,7 +3142,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
 
-       intel_opregion_enable_asle(dev);
+       i915_enable_asle_pipestat(dev);
 
        return 0;
 }
@@ -2716,7 +3232,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -2860,7 +3376,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
-       intel_opregion_enable_asle(dev);
+       i915_enable_asle_pipestat(dev);
 
        return 0;
 }
@@ -2952,13 +3468,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
                                                                  HOTPLUG_INT_STATUS_G4X :
-                                                                 HOTPLUG_INT_STATUS_I965);
+                                                                 HOTPLUG_INT_STATUS_I915);
 
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger,
-                                                           IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger,
+                                                           IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -3113,9 +3629,9 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-               /* Share pre & uninstall handlers with ILK/SNB */
+               /* Share uninstall handlers with ILK/SNB */
                dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_preinstall = ivybridge_irq_preinstall;
                dev->driver->irq_postinstall = ivybridge_irq_postinstall;
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ivybridge_enable_vblank;