drm/i915: Do not keep postponing the idle-work
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 452178c..78057fa 100644 (file)
@@ -54,6 +54,9 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
 
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return false;
+
        if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                return true;
 
@@ -505,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 
        *needs_clflush = 0;
 
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return -EINVAL;
 
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -606,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
        return ret ? - EFAULT : 0;
 }
 
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+                uint64_t page_base, int page_offset,
+                char __user *user_data,
+                unsigned long length, bool pwrite)
+{
+       void __iomem *ioaddr;
+       void *vaddr;
+       uint64_t unwritten;
+
+       ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
+       /* We can use the cpu mem copy function because this is X86. */
+       vaddr = (void __force *)ioaddr + page_offset;
+       if (pwrite)
+               unwritten = __copy_from_user(vaddr, user_data, length);
+       else
+               unwritten = __copy_to_user(user_data, vaddr, length);
+
+       io_mapping_unmap(ioaddr);
+       return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+                  struct drm_i915_gem_object *obj, uint64_t size,
+                  uint64_t data_offset, uint64_t data_ptr)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_mm_node node;
+       char __user *user_data;
+       uint64_t remain;
+       uint64_t offset;
+       int ret;
+
+       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+       if (ret) {
+               ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_get_pages(obj);
+               if (ret) {
+                       remove_mappable_node(&node);
+                       goto out;
+               }
+
+               i915_gem_object_pin_pages(obj);
+       } else {
+               node.start = i915_gem_obj_ggtt_offset(obj);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
+       }
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (ret)
+               goto out_unpin;
+
+       user_data = u64_to_user_ptr(data_ptr);
+       remain = size;
+       offset = data_offset;
+
+       mutex_unlock(&dev->struct_mutex);
+       if (likely(!i915.prefault_disable)) {
+               ret = fault_in_multipages_writeable(user_data, remain);
+               if (ret) {
+                       mutex_lock(&dev->struct_mutex);
+                       goto out_unpin;
+               }
+       }
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               u32 page_base = node.start;
+               unsigned page_offset = offset_in_page(offset);
+               unsigned page_length = PAGE_SIZE - page_offset;
+               page_length = remain < page_length ? remain : page_length;
+               if (node.allocated) {
+                       wmb();
+                       ggtt->base.insert_page(&ggtt->base,
+                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                              node.start,
+                                              I915_CACHE_NONE, 0);
+                       wmb();
+               } else {
+                       page_base += offset & PAGE_MASK;
+               }
+               /* This is a slow read/write as it tries to read from
+                * and write to user memory which may result into page
+                * faults, and so we cannot perform this under struct_mutex.
+                */
+               if (slow_user_access(ggtt->mappable, page_base,
+                                    page_offset, user_data,
+                                    page_length, false)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+               /* The user has modified the object whilst we tried
+                * reading from it, and we now have no idea what domain
+                * the pages should be in. As we have just been touching
+                * them directly, flush everything back to the GTT
+                * domain.
+                */
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       }
+
+out_unpin:
+       if (node.allocated) {
+               wmb();
+               ggtt->base.clear_range(&ggtt->base,
+                                      node.start, node.size,
+                                      true);
+               i915_gem_object_unpin_pages(obj);
+               remove_mappable_node(&node);
+       } else {
+               i915_gem_object_ggtt_unpin(obj);
+       }
+out:
+       return ret;
+}
+
 static int
 i915_gem_shmem_pread(struct drm_device *dev,
                     struct drm_i915_gem_object *obj,
@@ -621,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
+       if (!i915_gem_object_has_struct_page(obj))
+               return -ENODEV;
+
        user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
@@ -732,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pread(obj, args->offset, args->size);
 
        ret = i915_gem_shmem_pread(dev, obj, args, file);
 
+       /* pread for non shmem backed objects */
+       if (ret == -EFAULT || ret == -ENODEV)
+               ret = i915_gem_gtt_pread(dev, obj, args->size,
+                                       args->offset, args->data_ptr);
+
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -789,10 +928,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                         struct drm_file *file)
 {
        struct i915_ggtt *ggtt = &i915->ggtt;
+       struct drm_device *dev = obj->base.dev;
        struct drm_mm_node node;
        uint64_t remain, offset;
        char __user *user_data;
        int ret;
+       bool hit_slow_path = false;
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               return -EFAULT;
 
        ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
        if (ret) {
@@ -810,16 +954,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
        } else {
                node.start = i915_gem_obj_ggtt_offset(obj);
                node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
        }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
        if (ret)
                goto out_unpin;
 
-       ret = i915_gem_object_put_fence(obj);
-       if (ret)
-               goto out_unpin;
-
        intel_fb_obj_invalidate(obj, ORIGIN_GTT);
        obj->dirty = true;
 
@@ -849,11 +992,23 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                /* If we get a fault while copying data, then (presumably) our
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
+                * If the object is non-shmem backed, we retry again with the
+                * path that handles page fault.
                 */
                if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
-                       ret = -EFAULT;
-                       goto out_flush;
+                       hit_slow_path = true;
+                       mutex_unlock(&dev->struct_mutex);
+                       if (slow_user_access(ggtt->mappable,
+                                            page_base,
+                                            page_offset, user_data,
+                                            page_length, true)) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto out_flush;
+                       }
+
+                       mutex_lock(&dev->struct_mutex);
                }
 
                remain -= page_length;
@@ -862,6 +1017,19 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
        }
 
 out_flush:
+       if (hit_slow_path) {
+               if (ret == 0 &&
+                   (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+                       /* The user has modified the object whilst we tried
+                        * reading from it, and we now have no idea what domain
+                        * the pages should be in. As we have just been touching
+                        * them directly, flush everything back to the GTT
+                        * domain.
+                        */
+                       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+               }
+       }
+
        intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
        if (node.allocated) {
@@ -1121,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
        ret = -EFAULT;
@@ -1138,8 +1298,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->tiling_mode == I915_TILING_NONE &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+       if (!i915_gem_object_has_struct_page(obj) ||
            cpu_write_needs_clflush(obj)) {
                ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
@@ -1147,11 +1306,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT || ret == -ENOSPC) {
+       if (ret == -EFAULT) {
                if (obj->phys_handle)
                        ret = i915_gem_phys_pwrite(obj, args, file);
-               else
+               else if (i915_gem_object_has_struct_page(obj))
                        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+               else
+                       ret = -ENODEV;
        }
 
 out:
@@ -1182,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
        return 0;
 }
 
-static void fake_irq(unsigned long data)
-{
-       wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *engine)
-{
-       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
 static unsigned long local_clock_us(unsigned *cpu)
 {
        unsigned long t;
@@ -1225,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
        return this_cpu != cpu;
 }
 
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+                        int state, unsigned long timeout_us)
 {
-       unsigned long timeout;
        unsigned cpu;
 
        /* When waiting for high frequency requests, e.g. during synchronous
@@ -1240,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       if (req->engine->irq_refcount)
-               return -EBUSY;
-
-       /* Only spin if we know the GPU is processing this request */
-       if (!i915_gem_request_started(req, true))
-               return -EAGAIN;
-
-       timeout = local_clock_us(&cpu) + 5;
-       while (!need_resched()) {
-               if (i915_gem_request_completed(req, true))
-                       return 0;
+       timeout_us += local_clock_us(&cpu);
+       do {
+               if (i915_gem_request_completed(req))
+                       return true;
 
                if (signal_pending_state(state, current))
                        break;
 
-               if (busywait_stop(timeout, cpu))
+               if (busywait_stop(timeout_us, cpu))
                        break;
 
                cpu_relax_lowlatency();
-       }
-
-       if (i915_gem_request_completed(req, false))
-               return 0;
+       } while (!need_resched());
 
-       return -EAGAIN;
+       return false;
 }
 
 /**
@@ -1289,25 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        s64 *timeout,
                        struct intel_rps_client *rps)
 {
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_i915_private *dev_priv = req->i915;
-       const bool irq_test_in_progress =
-               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(wait);
-       unsigned long timeout_expire;
+       DEFINE_WAIT(reset);
+       struct intel_wait wait;
+       unsigned long timeout_remain;
        s64 before = 0; /* Only to silence a compiler warning. */
-       int ret;
+       int ret = 0;
 
-       WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+       might_sleep();
 
        if (list_empty(&req->list))
                return 0;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(req))
                return 0;
 
-       timeout_expire = 0;
+       timeout_remain = MAX_SCHEDULE_TIMEOUT;
        if (timeout) {
                if (WARN_ON(*timeout < 0))
                        return -EINVAL;
@@ -1315,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                if (*timeout == 0)
                        return -ETIME;
 
-               timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
 
                /*
                 * Record current time in case interrupted by signal, or wedged.
@@ -1323,75 +1460,70 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                before = ktime_get_raw_ns();
        }
 
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
        trace_i915_gem_request_wait_begin(req);
 
-       /* Optimistic spin for the next jiffie before touching IRQs */
-       ret = __i915_spin_request(req, state);
-       if (ret == 0)
-               goto out;
-
-       if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
-               ret = -ENODEV;
-               goto out;
-       }
+       if (INTEL_INFO(req->i915)->gen >= 6)
+               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
 
-       for (;;) {
-               struct timer_list timer;
+       /* Optimistic spin for the next ~jiffie before touching IRQs */
+       if (i915_spin_request(req, state, 5))
+               goto complete;
 
-               prepare_to_wait(&engine->irq_queue, &wait, state);
+       set_current_state(state);
+       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-               /* We need to check whether any gpu reset happened in between
-                * the request being submitted and now. If a reset has occurred,
-                * the request is effectively complete (we either are in the
-                * process of or have discarded the rendering and completely
-                * reset the GPU. The results of the request are lost and we
-                * are free to continue on with the original operation.
+       intel_wait_init(&wait, req->seqno);
+       if (intel_engine_add_wait(req->engine, &wait))
+               /* In order to check that we haven't missed the interrupt
+                * as we enabled it, we need to kick ourselves to do a
+                * coherent check on the seqno before we sleep.
                 */
-               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
-                       ret = 0;
-                       break;
-               }
-
-               if (i915_gem_request_completed(req, false)) {
-                       ret = 0;
-                       break;
-               }
+               goto wakeup;
 
+       for (;;) {
                if (signal_pending_state(state, current)) {
                        ret = -ERESTARTSYS;
                        break;
                }
 
-               if (timeout && time_after_eq(jiffies, timeout_expire)) {
+               /* Ensure that even if the GPU hangs, we get woken up.
+                *
+                * However, note that if no one is waiting, we never notice
+                * a gpu hang. Eventually, we will have to wait for a resource
+                * held by the GPU and so trigger a hangcheck. In the most
+                * pathological case, this will be upon memory starvation!
+                */
+               i915_queue_hangcheck(req->i915);
+
+               timeout_remain = io_schedule_timeout(timeout_remain);
+               if (timeout_remain == 0) {
                        ret = -ETIME;
                        break;
                }
 
-               timer.function = NULL;
-               if (timeout || missed_irq(dev_priv, engine)) {
-                       unsigned long expire;
+               if (intel_wait_complete(&wait))
+                       break;
 
-                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
-                       mod_timer(&timer, expire);
-               }
+               set_current_state(state);
 
-               io_schedule();
+wakeup:
+               /* Carefully check if the request is complete, giving time
+                * for the seqno to be visible following the interrupt.
+                * We also have to check in case we are kicked by the GPU
+                * reset in order to drop the struct_mutex.
+                */
+               if (__i915_request_irq_complete(req))
+                       break;
 
-               if (timer.function) {
-                       del_singleshot_timer_sync(&timer);
-                       destroy_timer_on_stack(&timer);
-               }
+               /* Only spin if we know the GPU is processing this request */
+               if (i915_spin_request(req, state, 2))
+                       break;
        }
-       if (!irq_test_in_progress)
-               engine->irq_put(engine);
-
-       finish_wait(&engine->irq_queue, &wait);
+       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-out:
+       intel_engine_remove_wait(req->engine, &wait);
+       __set_current_state(TASK_RUNNING);
+complete:
        trace_i915_gem_request_wait_end(req);
 
        if (timeout) {
@@ -1524,7 +1656,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
                return ret;
 
        /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+       if (!i915_reset_in_progress(&dev_priv->gpu_error))
                __i915_gem_request_retire__upto(req);
 
        return 0;
@@ -1585,7 +1717,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);
 
-       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+       if (!i915_reset_in_progress(&req->i915->gpu_error))
                __i915_gem_request_retire__upto(req);
 }
 
@@ -1649,6 +1781,13 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
        return &fpriv->rps;
 }
 
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+       return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+              ORIGIN_GTT : ORIGIN_CPU;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -1705,9 +1844,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
        if (write_domain != 0)
-               intel_fb_obj_invalidate(obj,
-                                       write_domain == I915_GEM_DOMAIN_GTT ?
-                                       ORIGIN_GTT : ORIGIN_CPU);
+               intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -1814,6 +1951,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+
+               /* This may race, but that's ok, it only gets set */
+               WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
        }
        drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
@@ -2613,6 +2753,13 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
        }
        i915_gem_retire_requests(dev_priv);
 
+       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+               while (intel_kick_waiters(dev_priv) ||
+                      intel_kick_signalers(dev_priv))
+                       yield();
+       }
+
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv)
                intel_ring_init_seqno(engine, seqno);
@@ -2662,6 +2809,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
        return 0;
 }
 
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       dev_priv->gt.active_engines |= intel_engine_flag(engine);
+       if (dev_priv->gt.awake)
+               return;
+
+       intel_runtime_pm_get_noresume(dev_priv);
+       dev_priv->gt.awake = true;
+
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_busy(dev_priv);
+
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->gt.retire_work,
+                          round_jiffies_up_relative(HZ));
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -2672,7 +2839,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                        bool flush_caches)
 {
        struct intel_engine_cs *engine;
-       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        u32 reserved_tail;
@@ -2682,7 +2848,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                return;
 
        engine = request->engine;
-       dev_priv = request->i915;
        ringbuf = request->ringbuf;
 
        /*
@@ -2748,14 +2913,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-       i915_queue_hangcheck(engine->i915);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->mm.retire_work,
-                          round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv);
-
        /* Sanity check that the reserved size was large enough. */
        ret = intel_ring_get_tail(ringbuf) - request_start;
        if (ret < 0)
@@ -2764,6 +2921,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                  "Not enough space reserved (%d bytes) "
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);
+
+       i915_gem_mark_busy(engine);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2852,7 +3011,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        kref_init(&req->ref);
        req->i915 = dev_priv;
        req->engine = engine;
-       req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
@@ -2912,8 +3070,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
+       /* We are called by the error capture and reset at a random
+        * point in time. In particular, note that neither is crucially
+        * ordered with an interrupt. After a hang, the GPU is dead and we
+        * assume that no more writes can happen (we waited long enough for
+        * all writes that were in transaction to be flushed) - adding an
+        * extra delay for a recent interrupt is pointless. Hence, we do
+        * not need an engine->irq_seqno_barrier() before the seqno reads.
+        */
        list_for_each_entry(request, &engine->request_list, list) {
-               if (i915_gem_request_completed(request, false))
+               if (i915_gem_request_completed(request))
                        continue;
 
                return request;
@@ -3045,7 +3211,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!i915_gem_request_completed(request, true))
+               if (!i915_gem_request_completed(request))
                        break;
 
                i915_gem_request_retire(request);
@@ -3068,55 +3234,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(engine->trace_irq_req &&
-                    i915_gem_request_completed(engine->trace_irq_req, true))) {
-               engine->irq_put(engine);
-               i915_gem_request_assign(&engine->trace_irq_req, NULL);
-       }
-
        WARN_ON(i915_verify_lists(engine->dev));
 }
 
-bool
-i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
-       bool idle = true;
+
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
 
        for_each_engine(engine, dev_priv) {
                i915_gem_retire_requests_ring(engine);
-               idle &= list_empty(&engine->request_list);
-               if (i915.enable_execlists) {
-                       spin_lock_bh(&engine->execlist_lock);
-                       idle &= list_empty(&engine->execlist_queue);
-                       spin_unlock_bh(&engine->execlist_lock);
-               }
+               if (list_empty(&engine->request_list))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
        }
 
-       if (idle)
-               mod_delayed_work(dev_priv->wq,
-                                  &dev_priv->mm.idle_work,
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
                                   msecs_to_jiffies(100));
-
-       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.retire_work.work);
+               container_of(work, typeof(*dev_priv), gt.retire_work.work);
        struct drm_device *dev = dev_priv->dev;
-       bool idle;
 
        /* Come back later if the device is busy... */
-       idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev_priv);
+               i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
-       if (!idle)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+       /* Keep the retire handler running until we are finally idle.
+        * We do not need to do this test under locking as in the worst-case
+        * we queue the retire worker once too often.
+        */
+       if (lockless_dereference(dev_priv->gt.awake))
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.retire_work,
                                   round_jiffies_up_relative(HZ));
 }
 
@@ -3124,25 +3287,55 @@ static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.idle_work.work);
+               container_of(work, typeof(*dev_priv), gt.idle_work.work);
        struct drm_device *dev = dev_priv->dev;
        struct intel_engine_cs *engine;
+       unsigned int stuck_engines;
+       bool rearm_hangcheck;
+
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
+
+       if (READ_ONCE(dev_priv->gt.active_engines))
+               return;
+
+       rearm_hangcheck =
+               cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               /* Currently busy, come back later */
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->gt.idle_work,
+                                msecs_to_jiffies(50));
+               goto out_rearm;
+       }
+
+       if (dev_priv->gt.active_engines)
+               goto out_unlock;
 
        for_each_engine(engine, dev_priv)
-               if (!list_empty(&engine->request_list))
-                       return;
+               i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       /* we probably should sync with hangcheck here, using cancel_work_sync.
-        * Also locking seems to be fubar here, engine->request_list is protected
-        * by dev->struct_mutex. */
+       GEM_BUG_ON(!dev_priv->gt.awake);
+       dev_priv->gt.awake = false;
+       rearm_hangcheck = false;
 
-       intel_mark_idle(dev_priv);
+       stuck_engines = intel_kick_waiters(dev_priv);
+       if (unlikely(stuck_engines)) {
+               DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
+               dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
+       }
 
-       if (mutex_trylock(&dev->struct_mutex)) {
-               for_each_engine(engine, dev_priv)
-                       i915_gem_batch_pool_fini(&engine->batch_pool);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
+       intel_runtime_pm_put(dev_priv);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
 
-               mutex_unlock(&dev->struct_mutex);
+out_rearm:
+       if (rearm_hangcheck) {
+               GEM_BUG_ON(!dev_priv->gt.awake);
+               i915_queue_hangcheck(dev_priv);
        }
 }
 
@@ -3167,7 +3360,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (req == NULL)
                        continue;
 
-               if (i915_gem_request_completed(req, true))
+               if (i915_gem_request_completed(req))
                        i915_gem_object_retire__read(obj, i);
        }
 
@@ -3275,7 +3468,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (to == from)
                return 0;
 
-       if (i915_gem_request_completed(from_req, true))
+       if (i915_gem_request_completed(from_req))
                return 0;
 
        if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
@@ -3502,26 +3695,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
        return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
-       /* Flush everything onto the inactive list. */
-       for_each_engine(engine, dev_priv) {
-               if (!i915.enable_execlists) {
-                       struct drm_i915_gem_request *req;
-
-                       req = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
 
-                       ret = i915_switch_context(req);
-                       i915_add_request_no_flush(req);
-                       if (ret)
-                               return ret;
-               }
+       for_each_engine(engine, dev_priv) {
+               if (engine->last_context == NULL)
+                       continue;
 
                ret = intel_engine_idle(engine);
                if (ret)
@@ -4014,9 +4197,7 @@ out:
         * object is now coherent at its new cache level (with respect
         * to the access domain).
         */
-       if (obj->cache_dirty &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
-           cpu_write_needs_clflush(obj)) {
+       if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
                if (i915_gem_clflush_object(obj, true))
                        i915_gem_chipset_flush(to_i915(obj->base.dev));
        }
@@ -4287,7 +4468,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
        ret = __i915_wait_request(target, true, NULL, NULL);
        if (ret == 0)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+               queue_delayed_work(dev_priv->wq, &dev_priv->gt.retire_work, 0);
 
        i915_gem_request_unreference(target);
 
@@ -4794,7 +4975,7 @@ i915_gem_suspend(struct drm_device *dev)
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto err;
 
@@ -4805,13 +4986,13 @@ i915_gem_suspend(struct drm_device *dev)
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-       flush_delayed_work(&dev_priv->mm.idle_work);
+       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       flush_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(dev_priv->mm.busy);
+       WARN_ON(dev_priv->gt.awake);
 
        return 0;
 
@@ -4980,12 +5161,6 @@ i915_gem_init_hw(struct drm_device *dev)
        if (ret)
                goto out;
 
-       /*
-        * Increment the next seqno by 0x100 so we have a visible break
-        * on re-initialisation
-        */
-       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
-
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        return ret;
@@ -5119,22 +5294,15 @@ i915_gem_load_init(struct drm_device *dev)
                init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
-       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
                          i915_gem_idle_work_handler);
+       init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       /*
-        * Set initial sequence number for requests.
-        * Using this number allows the wraparound to happen early,
-        * catching any obvious problems.
-        */
-       dev_priv->next_seqno = ((u32)~0 - 0x1100);
-       dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5370,7 +5538,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);