drm/i915: s/hotplug_irq_storm_detect/intel_hpd_irq_handler/
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
index 338e726..378ade0 100644 (file)
@@ -70,15 +70,6 @@ static const u32 hpd_status_gen4[] = {
        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
 };
 
-static const u32 hpd_status_i965[] = {
-        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
-        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
-        [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
-        [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
-        [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-        [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-};
-
 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
        [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
        [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
@@ -95,6 +86,8 @@ static void i915_hpd_irq_setup(struct drm_device *dev);
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((dev_priv->irq_mask & mask) != 0) {
                dev_priv->irq_mask &= ~mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -105,6 +98,8 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 static void
 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((dev_priv->irq_mask & mask) != mask) {
                dev_priv->irq_mask |= mask;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -118,6 +113,8 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
        struct intel_crtc *crtc;
        enum pipe pipe;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        for_each_pipe(pipe) {
                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
@@ -683,7 +680,6 @@ static void notify_ring(struct drm_device *dev,
 
        wake_up_all(&ring->irq_queue);
        if (i915_enable_hangcheck) {
-               dev_priv->gpu_error.hangcheck_count = 0;
                mod_timer(&dev_priv->gpu_error.hangcheck_timer,
                          round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
        }
@@ -700,24 +696,33 @@ static void gen6_pm_rps_work(struct work_struct *work)
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
        pm_imr = I915_READ(GEN6_PMIMR);
-       I915_WRITE(GEN6_PMIMR, 0);
+       /* Make sure not to corrupt PMIMR state used by ringbuffer code */
+       I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
        spin_unlock_irq(&dev_priv->rps.lock);
 
-       if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
+       if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
                return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
-       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
                new_delay = dev_priv->rps.cur_delay + 1;
-       else
+
+               /*
+                * For better performance, jump directly
+                * to RPe if we're below it.
+                */
+               if (IS_VALLEYVIEW(dev_priv->dev) &&
+                   dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+                       new_delay = dev_priv->rps.rpe_delay;
+       } else
                new_delay = dev_priv->rps.cur_delay - 1;
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
-       if (!(new_delay > dev_priv->rps.max_delay ||
-             new_delay < dev_priv->rps.min_delay)) {
+       if (new_delay >= dev_priv->rps.min_delay &&
+           new_delay <= dev_priv->rps.max_delay) {
                if (IS_VALLEYVIEW(dev_priv->dev))
                        valleyview_set_rps(dev_priv->dev, new_delay);
                else
@@ -779,7 +784,7 @@ static void ivybridge_parity_work(struct work_struct *work)
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -811,7 +816,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
                return;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -823,25 +828,26 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                               u32 gt_iir)
 {
 
-       if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
-                     GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
                notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+       if (gt_iir & GT_BSD_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[VCS]);
-       if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+       if (gt_iir & GT_BLT_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[BCS]);
 
-       if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
-                     GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-                     GT_RENDER_CS_ERROR_INTERRUPT)) {
+       if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+                     GT_BSD_CS_ERROR_INTERRUPT |
+                     GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
                DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
                i915_handle_error(dev, false);
        }
 
-       if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+       if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
                ivybridge_handle_parity_error(dev);
 }
 
+/* Legacy way of handling PM interrupts */
 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
                                u32 pm_iir)
 {
@@ -869,9 +875,9 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 #define HPD_STORM_DETECT_PERIOD 1000
 #define HPD_STORM_THRESHOLD 5
 
-static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
-                                           u32 hotplug_trigger,
-                                           const u32 *hpd)
+static inline bool intel_hpd_irq_handler(struct drm_device *dev,
+                                        u32 hotplug_trigger,
+                                        const u32 *hpd)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -921,6 +927,38 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+                              u32 pm_iir)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->rps.lock, flags);
+       dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+       if (dev_priv->rps.pm_iir) {
+               I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+               /* never want to mask useful interrupts. (also posting read) */
+               WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+               /* TODO: if queue_work is slow, move it out of the spinlock */
+               queue_work(dev_priv->wq, &dev_priv->rps.work);
+       }
+       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+       if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+               if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+                       notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+
+               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+                       DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+                       i915_handle_error(dev_priv->dev, false);
+               }
+       }
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -980,7 +1018,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                         hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -992,7 +1030,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
                        gmbus_irq_handler(dev);
 
-               if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               if (pm_iir & GEN6_PM_RPS_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
 
                I915_WRITE(GTIIR, gt_iir);
@@ -1011,7 +1049,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
        if (hotplug_trigger) {
-               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
+               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx))
                        ibx_hpd_irq_setup(dev);
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
        }
@@ -1116,7 +1154,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
        if (hotplug_trigger) {
-               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
+               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt))
                        ibx_hpd_irq_setup(dev);
                queue_work(dev_priv->wq, &dev_priv->hotplug_work);
        }
@@ -1185,8 +1223,11 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
        /* On Haswell, also mask ERR_INT because we don't want to risk
         * generating "unclaimed register" interrupts from inside the interrupt
         * handler. */
-       if (IS_HASWELL(dev))
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
                ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
        gt_iir = I915_READ(GTIIR);
        if (gt_iir) {
@@ -1231,14 +1272,20 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 
        pm_iir = I915_READ(GEN6_PMIIR);
        if (pm_iir) {
-               if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               if (IS_HASWELL(dev))
+                       hsw_pm_irq_handler(dev_priv, pm_iir);
+               else if (pm_iir & GEN6_PM_RPS_EVENTS)
                        gen6_queue_rps_work(dev_priv, pm_iir);
                I915_WRITE(GEN6_PMIIR, pm_iir);
                ret = IRQ_HANDLED;
        }
 
-       if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
-               ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               if (ivb_can_enable_err_int(dev))
+                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
@@ -1254,9 +1301,10 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-       if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
                notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & GT_BSD_USER_INTERRUPT)
+       if (gt_iir & ILK_BSD_USER_INTERRUPT)
                notify_ring(dev, &dev_priv->ring[VCS]);
 }
 
@@ -1346,7 +1394,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
                ironlake_handle_rps_change(dev);
 
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+       if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
                gen6_queue_rps_work(dev_priv, pm_iir);
 
        I915_WRITE(GTIIR, gt_iir);
@@ -1619,7 +1667,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
        struct drm_i915_gem_object *obj;
        int i = 0;
 
-       list_for_each_entry(obj, head, gtt_list) {
+       list_for_each_entry(obj, head, global_list) {
                if (obj->pin_count == 0)
                        continue;
 
@@ -1761,7 +1809,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
        if (ring->id != RCS || !error->ccid)
                return;
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
                                                                    obj, 1);
@@ -1898,7 +1946,7 @@ static void i915_capture_error_state(struct drm_device *dev)
        list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
                i++;
        error->active_bo_count = i;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
                        i++;
        error->pinned_bo_count = i - error->active_bo_count;
@@ -2278,38 +2326,28 @@ ring_last_seqno(struct intel_ring_buffer *ring)
                          struct drm_i915_gem_request, list)->seqno;
 }
 
-static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
-                                    u32 ring_seqno, bool *err)
+static bool
+ring_idle(struct intel_ring_buffer *ring, u32 seqno)
 {
-       if (list_empty(&ring->request_list) ||
-           i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
-               /* Issue a wake-up to catch stuck h/w. */
-               if (waitqueue_active(&ring->irq_queue)) {
-                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                 ring->name);
-                       wake_up_all(&ring->irq_queue);
-                       *err = true;
-               }
-               return true;
-       }
-       return false;
+       return (list_empty(&ring->request_list) ||
+               i915_seqno_passed(seqno, ring_last_seqno(ring)));
 }
 
-static bool semaphore_passed(struct intel_ring_buffer *ring)
+static struct intel_ring_buffer *
+semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
-       struct intel_ring_buffer *signaller;
-       u32 cmd, ipehr, acthd_min;
+       u32 cmd, ipehr, acthd, acthd_min;
 
        ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
        if ((ipehr & ~(0x3 << 16)) !=
            (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
-               return false;
+               return NULL;
 
        /* ACTHD is likely pointing to the dword after the actual command,
         * so scan backwards until we find the MBOX.
         */
+       acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
        acthd_min = max((int)acthd - 3 * 4, 0);
        do {
                cmd = ioread32(ring->virtual_start + acthd);
@@ -2318,128 +2356,216 @@ static bool semaphore_passed(struct intel_ring_buffer *ring)
 
                acthd -= 4;
                if (acthd < acthd_min)
-                       return false;
+                       return NULL;
        } while (1);
 
-       signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
-       return i915_seqno_passed(signaller->get_seqno(signaller, false),
-                                ioread32(ring->virtual_start+acthd+4)+1);
+       *seqno = ioread32(ring->virtual_start+acthd+4)+1;
+       return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
 }
 
-static bool kick_ring(struct intel_ring_buffer *ring)
+static int semaphore_passed(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp = I915_READ_CTL(ring);
-       if (tmp & RING_WAIT) {
-               DRM_ERROR("Kicking stuck wait on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct intel_ring_buffer *signaller;
+       u32 seqno, ctl;
 
-       if (INTEL_INFO(dev)->gen >= 6 &&
-           tmp & RING_WAIT_SEMAPHORE &&
-           semaphore_passed(ring)) {
-               DRM_ERROR("Kicking stuck semaphore on %s\n",
-                         ring->name);
-               I915_WRITE_CTL(ring, tmp);
-               return true;
-       }
-       return false;
+       ring->hangcheck.deadlock = true;
+
+       signaller = semaphore_waits_for(ring, &seqno);
+       if (signaller == NULL || signaller->hangcheck.deadlock)
+               return -1;
+
+       /* cursory check for an unkickable deadlock */
+       ctl = I915_READ_CTL(signaller);
+       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
+               return -1;
+
+       return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
 }
 
-static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
 {
-       if (IS_GEN2(ring->dev))
-               return false;
+       struct intel_ring_buffer *ring;
+       int i;
 
-       /* Is the chip hanging on a WAIT_FOR_EVENT?
-        * If so we can simply poke the RB_WAIT bit
-        * and break the hang. This should work on
-        * all but the second generation chipsets.
-        */
-       return !kick_ring(ring);
+       for_each_ring(ring, dev_priv, i)
+               ring->hangcheck.deadlock = false;
 }
 
-static bool i915_hangcheck_hung(struct drm_device *dev)
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
 
-       if (dev_priv->gpu_error.hangcheck_count++ > 1) {
-               bool hung = true;
-               struct intel_ring_buffer *ring;
-               int i;
+       if (ring->hangcheck.acthd != acthd)
+               return active;
 
-               DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
-               i915_handle_error(dev, true);
+       if (IS_GEN2(dev))
+               return hung;
 
-               for_each_ring(ring, dev_priv, i)
-                       hung &= i915_hangcheck_ring_hung(ring);
+       /* Is the chip hanging on a WAIT_FOR_EVENT?
+        * If so we can simply poke the RB_WAIT bit
+        * and break the hang. This should work on
+        * all but the second generation chipsets.
+        */
+       tmp = I915_READ_CTL(ring);
+       if (tmp & RING_WAIT) {
+               DRM_ERROR("Kicking stuck wait on %s\n",
+                         ring->name);
+               I915_WRITE_CTL(ring, tmp);
+               return kick;
+       }
 
-               return hung;
+       if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+               switch (semaphore_passed(ring)) {
+               default:
+                       return hung;
+               case 1:
+                       DRM_ERROR("Kicking stuck semaphore on %s\n",
+                                 ring->name);
+                       I915_WRITE_CTL(ring, tmp);
+                       return kick;
+               case 0:
+                       return wait;
+               }
        }
 
-       return false;
+       return hung;
 }
 
 /**
  * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. The first time this is called we simply record
- * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
- * again, we assume the chip is wedged and try to fix it.
+ * batchbuffers in a long time. We keep track per ring seqno progress and
+ * if there are no progress, hangcheck score for that ring is increased.
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * we assume chip is wedged and try to fix it by resetting the chip.
  */
 void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       bool err = false, idle;
        int i;
-       u32 seqno[I915_NUM_RINGS];
-       bool work_done;
+       int busy_count = 0, rings_hung = 0;
+       bool stuck[I915_NUM_RINGS] = { 0 };
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define FIRE 30
 
        if (!i915_enable_hangcheck)
                return;
 
-       idle = true;
        for_each_ring(ring, dev_priv, i) {
-               seqno[i] = ring->get_seqno(ring, false);
-               idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
-       }
-
-       /* If all work is done then ACTHD clearly hasn't advanced. */
-       if (idle) {
-               if (err) {
-                       if (i915_hangcheck_hung(dev))
-                               return;
-
-                       goto repeat;
+               u32 seqno, acthd;
+               bool busy = true;
+
+               semaphore_clear_deadlocks(dev_priv);
+
+               seqno = ring->get_seqno(ring, false);
+               acthd = intel_ring_get_active_head(ring);
+
+               if (ring->hangcheck.seqno == seqno) {
+                       if (ring_idle(ring, seqno)) {
+                               if (waitqueue_active(&ring->irq_queue)) {
+                                       /* Issue a wake-up to catch stuck h/w. */
+                                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+                                                 ring->name);
+                                       wake_up_all(&ring->irq_queue);
+                                       ring->hangcheck.score += HUNG;
+                               } else
+                                       busy = false;
+                       } else {
+                               int score;
+
+                               /* We always increment the hangcheck score
+                                * if the ring is busy and still processing
+                                * the same request, so that no single request
+                                * can run indefinitely (such as a chain of
+                                * batches). The only time we do not increment
+                                * the hangcheck score on this ring, if this
+                                * ring is in a legitimate wait for another
+                                * ring. In that case the waiting ring is a
+                                * victim and we want to be sure we catch the
+                                * right culprit. Then every time we do kick
+                                * the ring, add a small increment to the
+                                * score so that we can catch a batch that is
+                                * being repeatedly kicked and so responsible
+                                * for stalling the machine.
+                                */
+                               ring->hangcheck.action = ring_stuck(ring,
+                                                                   acthd);
+
+                               switch (ring->hangcheck.action) {
+                               case wait:
+                                       score = 0;
+                                       break;
+                               case active:
+                                       score = BUSY;
+                                       break;
+                               case kick:
+                                       score = KICK;
+                                       break;
+                               case hung:
+                                       score = HUNG;
+                                       stuck[i] = true;
+                                       break;
+                               }
+                               ring->hangcheck.score += score;
+                       }
+               } else {
+                       /* Gradually reduce the count so that we catch DoS
+                        * attempts across multiple batches.
+                        */
+                       if (ring->hangcheck.score > 0)
+                               ring->hangcheck.score--;
                }
 
-               dev_priv->gpu_error.hangcheck_count = 0;
-               return;
+               ring->hangcheck.seqno = seqno;
+               ring->hangcheck.acthd = acthd;
+               busy_count += busy;
        }
 
-       work_done = false;
        for_each_ring(ring, dev_priv, i) {
-               if (ring->hangcheck.seqno != seqno[i]) {
-                       work_done = true;
-                       ring->hangcheck.seqno = seqno[i];
+               if (ring->hangcheck.score > FIRE) {
+                       DRM_ERROR("%s on %s\n",
+                                 stuck[i] ? "stuck" : "no progress",
+                                 ring->name);
+                       rings_hung++;
                }
        }
 
-       if (!work_done) {
-               if (i915_hangcheck_hung(dev))
-                       return;
-       } else {
-               dev_priv->gpu_error.hangcheck_count = 0;
-       }
+       if (rings_hung)
+               return i915_handle_error(dev, true);
 
-repeat:
-       /* Reset timer case chip hangs without another request being added */
-       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+       if (busy_count)
+               /* Reset timer case chip hangs without another request
+                * being added */
+               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+                         round_jiffies_up(jiffies +
+                                          DRM_I915_HANGCHECK_JIFFIES));
+}
+
+static void ibx_irq_preinstall(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (HAS_PCH_NOP(dev))
+               return;
+
+       /* south display irq */
+       I915_WRITE(SDEIMR, 0xffffffff);
+       /*
+        * SDEIER is also touched by the interrupt handler to work around missed
+        * PCH interrupts. Hence we can't update it after the interrupt handler
+        * is enabled - instead we unconditionally enable all PCH interrupt
+        * sources here, but then only unmask them as needed with SDEIMR.
+        */
+       I915_WRITE(SDEIER, 0xffffffff);
+       POSTING_READ(SDEIER);
 }
 
 /* drm_dma.h hooks
@@ -2463,19 +2589,34 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
        I915_WRITE(GTIER, 0x0);
        POSTING_READ(GTIER);
 
-       if (HAS_PCH_NOP(dev))
-               return;
+       ibx_irq_preinstall(dev);
+}
 
-       /* south display irq */
-       I915_WRITE(SDEIMR, 0xffffffff);
-       /*
-        * SDEIER is also touched by the interrupt handler to work around missed
-        * PCH interrupts. Hence we can't update it after the interrupt handler
-        * is enabled - instead we unconditionally enable all PCH interrupt
-        * sources here, but then only unmask them as needed with SDEIMR.
-        */
-       I915_WRITE(SDEIER, 0xffffffff);
-       POSTING_READ(SDEIER);
+static void ivybridge_irq_preinstall(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+       atomic_set(&dev_priv->irq_received, 0);
+
+       I915_WRITE(HWSTAM, 0xeffe);
+
+       /* XXX hotplug from PCH */
+
+       I915_WRITE(DEIMR, 0xffffffff);
+       I915_WRITE(DEIER, 0x0);
+       POSTING_READ(DEIER);
+
+       /* and GT */
+       I915_WRITE(GTIMR, 0xffffffff);
+       I915_WRITE(GTIER, 0x0);
+       POSTING_READ(GTIER);
+
+       /* Power management */
+       I915_WRITE(GEN6_PMIMR, 0xffffffff);
+       I915_WRITE(GEN6_PMIER, 0x0);
+       POSTING_READ(GEN6_PMIER);
+
+       ibx_irq_preinstall(dev);
 }
 
 static void valleyview_irq_preinstall(struct drm_device *dev)
@@ -2569,20 +2710,23 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
+       unsigned long irqflags;
+
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
                           DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
                           DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
-       u32 render_irqs;
+       u32 gt_irqs;
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+       I915_WRITE(DEIER, display_mask |
+                         DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
        POSTING_READ(DEIER);
 
        dev_priv->gt_irq_mask = ~0;
@@ -2590,26 +2734,28 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
+       gt_irqs = GT_RENDER_USER_INTERRUPT;
+
        if (IS_GEN6(dev))
-               render_irqs =
-                       GT_USER_INTERRUPT |
-                       GEN6_BSD_USER_INTERRUPT |
-                       GEN6_BLITTER_USER_INTERRUPT;
+               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        else
-               render_irqs =
-                       GT_USER_INTERRUPT |
-                       GT_PIPE_NOTIFY |
-                       GT_BSD_USER_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+               gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+                          ILK_BSD_USER_INTERRUPT;
+
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
        ibx_irq_postinstall(dev);
 
        if (IS_IRONLAKE_M(dev)) {
-               /* Clear & enable PCU event interrupts */
-               I915_WRITE(DEIIR, DE_PCU_EVENT);
-               I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+               /* Enable PCU event interrupts
+                *
+                * spinlocking not required here for correctness since interrupt
+                * setup is guaranteed to run in single-threaded context. But we
+                * need it to make the assert_spin_locked happy. */
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
                ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
 
        return 0;
@@ -2626,7 +2772,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
                DE_PLANEA_FLIP_DONE_IVB |
                DE_AUX_CHANNEL_A_IVB |
                DE_ERR_INT_IVB;
-       u32 render_irqs;
+       u32 pm_irqs = GEN6_PM_RPS_EVENTS;
+       u32 gt_irqs;
 
        dev_priv->irq_mask = ~display_mask;
 
@@ -2641,16 +2788,32 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
                   DE_PIPEA_VBLANK_IVB);
        POSTING_READ(DEIER);
 
-       dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+       dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-       render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-               GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+                 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
+       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+       if (HAS_VEBOX(dev))
+               pm_irqs |= PM_VEBOX_USER_INTERRUPT |
+                       PM_VEBOX_CS_ERROR_INTERRUPT;
+
+       /* Our enable/disable rps functions may touch these registers so
+        * make sure to set a known state for only the non-RPS bits.
+        * The RMW is extra paranoia since this should be called after being set
+        * to a known state in preinstall.
+        * */
+       I915_WRITE(GEN6_PMIMR,
+                  (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
+       I915_WRITE(GEN6_PMIER,
+                  (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
+       POSTING_READ(GEN6_PMIER);
+
        ibx_irq_postinstall(dev);
 
        return 0;
@@ -2659,9 +2822,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 gt_irqs;
        u32 enable_mask;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
-       u32 render_irqs;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
        enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2697,9 +2860,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-       render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-               GEN6_BLITTER_USER_INTERRUPT;
-       I915_WRITE(GTIER, render_irqs);
+       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+               GT_BLT_USER_INTERRUPT;
+       I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
        /* ack & enable invalid PTE error interrupts */
@@ -3069,7 +3232,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -3305,13 +3468,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
                                                                  HOTPLUG_INT_STATUS_G4X :
-                                                                 HOTPLUG_INT_STATUS_I965);
+                                                                 HOTPLUG_INT_STATUS_I915);
 
                        DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
                                  hotplug_status);
                        if (hotplug_trigger) {
-                               if (hotplug_irq_storm_detect(dev, hotplug_trigger,
-                                                           IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
+                               if (intel_hpd_irq_handler(dev, hotplug_trigger,
+                                                           IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
                                        i915_hpd_irq_setup(dev);
                                queue_work(dev_priv->wq,
                                           &dev_priv->hotplug_work);
@@ -3466,9 +3629,9 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-               /* Share pre & uninstall handlers with ILK/SNB */
+               /* Share uninstall handlers with ILK/SNB */
                dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ironlake_irq_preinstall;
+               dev->driver->irq_preinstall = ivybridge_irq_preinstall;
                dev->driver->irq_postinstall = ivybridge_irq_postinstall;
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ivybridge_enable_vblank;