drm/i915: Move the get/put irq locking into the caller
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 9121646..994f761 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
 int __intel_ring_space(int head, int tail, int size)
 {
        int space = head - tail;
@@ -53,25 +58,19 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
                                            ringbuf->tail, ringbuf->size);
 }
 
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-       intel_ring_update_space(ringbuf);
-       return ringbuf->space;
+       struct drm_i915_private *dev_priv = engine->i915;
+       return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
-bool intel_ring_stopped(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
-}
-
-static void __intel_ring_advance(struct intel_engine_cs *ring)
-{
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_ring_stopped(ring))
+       if (intel_engine_stopped(engine))
                return;
-       ring->write_tail(ring, ringbuf->tail);
+       engine->write_tail(engine, ringbuf->tail);
 }
 
 static int
@@ -79,7 +78,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        u32 cmd;
        int ret;
 
@@ -94,9 +93,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, cmd);
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -106,8 +105,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_device *dev = ring->dev;
+       struct intel_engine_cs *engine = req->engine;
        u32 cmd;
        int ret;
 
@@ -146,16 +144,16 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                cmd |= MI_EXE_FLUSH;
 
        if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-           (IS_G4X(dev) || IS_GEN5(dev)))
+           (IS_G4X(req->i915) || IS_GEN5(req->i915)))
                cmd |= MI_INVALIDATE_ISP;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, cmd);
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -200,34 +198,34 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       struct intel_engine_cs *engine = req->engine;
+       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-       intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+       intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
                        PIPE_CONTROL_STALL_AT_SCOREBOARD);
-       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-       intel_ring_emit(ring, 0); /* low dword */
-       intel_ring_emit(ring, 0); /* high dword */
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+       intel_ring_emit(engine, 0); /* low dword */
+       intel_ring_emit(engine, 0); /* high dword */
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-       intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+       intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -236,9 +234,9 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -276,11 +274,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(ring, flags);
-       intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(engine, flags);
+       intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(engine, 0);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -288,19 +286,19 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
                              PIPE_CONTROL_STALL_AT_SCOREBOARD);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, 0);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -309,9 +307,9 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /*
@@ -360,11 +358,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-       intel_ring_emit(ring, flags);
-       intel_ring_emit(ring, scratch_addr);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+       intel_ring_emit(engine, flags);
+       intel_ring_emit(engine, scratch_addr);
+       intel_ring_emit(engine, 0);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -373,20 +371,20 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
                       u32 flags, u32 scratch_addr)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 6);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-       intel_ring_emit(ring, flags);
-       intel_ring_emit(ring, scratch_addr);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(engine, flags);
+       intel_ring_emit(engine, scratch_addr);
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, 0);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -396,7 +394,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        flags |= PIPE_CONTROL_CS_STALL;
@@ -429,51 +427,50 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *ring,
+static void ring_write_tail(struct intel_engine_cs *engine,
                            u32 value)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       I915_WRITE_TAIL(ring, value);
+       struct drm_i915_private *dev_priv = engine->i915;
+       I915_WRITE_TAIL(engine, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u64 acthd;
 
-       if (INTEL_INFO(ring->dev)->gen >= 8)
-               acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-                                        RING_ACTHD_UDW(ring->mmio_base));
-       else if (INTEL_INFO(ring->dev)->gen >= 4)
-               acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+       if (INTEL_GEN(dev_priv) >= 8)
+               acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+                                        RING_ACTHD_UDW(engine->mmio_base));
+       else if (INTEL_GEN(dev_priv) >= 4)
+               acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
 
        return acthd;
 }
 
-static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 addr;
 
        addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(ring->dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
        I915_WRITE(HWS_PGA, addr);
 }
 
-static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        i915_reg_t mmio;
 
        /* The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN7(dev)) {
-               switch (ring->id) {
+       if (IS_GEN7(dev_priv)) {
+               switch (engine->id) {
                case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
                        break;
@@ -492,14 +489,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
                        mmio = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(ring->dev)) {
-               mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+       } else if (IS_GEN6(dev_priv)) {
+               mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                /* XXX: gen8 returns to sanity */
-               mmio = RING_HWS_PGA(ring->mmio_base);
+               mmio = RING_HWS_PGA(engine->mmio_base);
        }
 
-       I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+       I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
        POSTING_READ(mmio);
 
        /*
@@ -509,129 +506,141 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
         * arises: do we still need this and if so how should we go about
         * invalidating the TLB?
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
-               i915_reg_t reg = RING_INSTPM(ring->mmio_base);
+       if (IS_GEN(dev_priv, 6, 7)) {
+               i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
                /* ring should be idle before issuing a sync flush*/
-               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+               WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
                I915_WRITE(reg,
                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
                                              INSTPM_SYNC_FLUSH));
-               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, INSTPM_SYNC_FLUSH, 0,
+                                           1000))
                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
-                                 ring->name);
+                                 engine->name);
        }
 }
 
-static bool stop_ring(struct intel_engine_cs *ring)
+static bool stop_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!IS_GEN2(ring->dev)) {
-               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-                       DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+       if (!IS_GEN2(dev_priv)) {
+               I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+               if (intel_wait_for_register(dev_priv,
+                                           RING_MI_MODE(engine->mmio_base),
+                                           MODE_IDLE,
+                                           MODE_IDLE,
+                                           1000)) {
+                       DRM_ERROR("%s : timed out trying to stop ring\n",
+                                 engine->name);
                        /* Sometimes we observe that the idle flag is not
                         * set even though the ring is empty. So double
                         * check before giving up.
                         */
-                       if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+                       if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
                                return false;
                }
        }
 
-       I915_WRITE_CTL(ring, 0);
-       I915_WRITE_HEAD(ring, 0);
-       ring->write_tail(ring, 0);
+       I915_WRITE_CTL(engine, 0);
+       I915_WRITE_HEAD(engine, 0);
+       engine->write_tail(engine, 0);
 
-       if (!IS_GEN2(ring->dev)) {
-               (void)I915_READ_CTL(ring);
-               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       if (!IS_GEN2(dev_priv)) {
+               (void)I915_READ_CTL(engine);
+               I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
 
-       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+       return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 
-static int init_ring_common(struct intel_engine_cs *ring)
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = ring->buffer;
+       memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static int init_ring_common(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret = 0;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       if (!stop_ring(ring)) {
+       if (!stop_ring(engine)) {
                /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
-                             ring->name,
-                             I915_READ_CTL(ring),
-                             I915_READ_HEAD(ring),
-                             I915_READ_TAIL(ring),
-                             I915_READ_START(ring));
+                             engine->name,
+                             I915_READ_CTL(engine),
+                             I915_READ_HEAD(engine),
+                             I915_READ_TAIL(engine),
+                             I915_READ_START(engine));
 
-               if (!stop_ring(ring)) {
+               if (!stop_ring(engine)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
-                                 ring->name,
-                                 I915_READ_CTL(ring),
-                                 I915_READ_HEAD(ring),
-                                 I915_READ_TAIL(ring),
-                                 I915_READ_START(ring));
+                                 engine->name,
+                                 I915_READ_CTL(engine),
+                                 I915_READ_HEAD(engine),
+                                 I915_READ_TAIL(engine),
+                                 I915_READ_START(engine));
                        ret = -EIO;
                        goto out;
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
+       if (I915_NEED_GFX_HWS(dev_priv))
+               intel_ring_setup_status_page(engine);
        else
-               ring_setup_phys_status_page(ring);
+               ring_setup_phys_status_page(engine);
 
        /* Enforce ordering by reading HEAD register back */
-       I915_READ_HEAD(ring);
+       I915_READ_HEAD(engine);
 
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+       I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
 
        /* WaClearRingBufHeadRegAtInit:ctg,elk */
-       if (I915_READ_HEAD(ring))
+       if (I915_READ_HEAD(engine))
                DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
-                         ring->name, I915_READ_HEAD(ring));
-       I915_WRITE_HEAD(ring, 0);
-       (void)I915_READ_HEAD(ring);
+                         engine->name, I915_READ_HEAD(engine));
+       I915_WRITE_HEAD(engine, 0);
+       (void)I915_READ_HEAD(engine);
 
-       I915_WRITE_CTL(ring,
+       I915_WRITE_CTL(engine,
                        ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
-       if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
-                    (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+       if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
+                    I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
+                    (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                          "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
-                         ring->name,
-                         I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
-                         I915_READ_HEAD(ring), I915_READ_TAIL(ring),
-                         I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+                         engine->name,
+                         I915_READ_CTL(engine),
+                         I915_READ_CTL(engine) & RING_VALID,
+                         I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+                         I915_READ_START(engine),
+                         (unsigned long)i915_gem_obj_ggtt_offset(obj));
                ret = -EIO;
                goto out;
        }
 
        ringbuf->last_retired_head = -1;
-       ringbuf->head = I915_READ_HEAD(ring);
-       ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+       ringbuf->head = I915_READ_HEAD(engine);
+       ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
        intel_ring_update_space(ringbuf);
 
-       memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+       intel_engine_init_hangcheck(engine);
 
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -639,76 +648,58 @@ out:
        return ret;
 }
 
-void
-intel_fini_pipe_control(struct intel_engine_cs *ring)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-
-       if (ring->scratch.obj == NULL)
+       if (engine->scratch.obj == NULL)
                return;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               kunmap(sg_page(ring->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(ring->scratch.obj);
-       }
-
-       drm_gem_object_unreference(&ring->scratch.obj->base);
-       ring->scratch.obj = NULL;
+       i915_gem_object_ggtt_unpin(engine->scratch.obj);
+       drm_gem_object_unreference(&engine->scratch.obj->base);
+       engine->scratch.obj = NULL;
 }
 
-int
-intel_init_pipe_control(struct intel_engine_cs *ring)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 {
+       struct drm_i915_gem_object *obj;
        int ret;
 
-       WARN_ON(ring->scratch.obj);
+       WARN_ON(engine->scratch.obj);
 
-       ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
-       if (ring->scratch.obj == NULL) {
-               DRM_ERROR("Failed to allocate seqno page\n");
-               ret = -ENOMEM;
+       obj = i915_gem_object_create_stolen(engine->i915->dev, size);
+       if (!obj)
+               obj = i915_gem_object_create(engine->i915->dev, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               ret = PTR_ERR(obj);
                goto err;
        }
 
-       ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
-       if (ret)
-               goto err_unref;
-
-       ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
        if (ret)
                goto err_unref;
 
-       ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
-       ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
-       if (ring->scratch.cpu_page == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-
+       engine->scratch.obj = obj;
+       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
-                        ring->name, ring->scratch.gtt_offset);
+                        engine->name, engine->scratch.gtt_offset);
        return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(ring->scratch.obj);
 err_unref:
-       drm_gem_object_unreference(&ring->scratch.obj->base);
+       drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
        return ret;
 }
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *engine = req->engine;
+       struct i915_workarounds *w = &req->i915->workarounds;
        int ret, i;
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_workarounds *w = &dev_priv->workarounds;
 
        if (w->count == 0)
                return 0;
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -717,16 +708,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
        for (i = 0; i < w->count; i++) {
-               intel_ring_emit_reg(ring, w->reg[i].addr);
-               intel_ring_emit(ring, w->reg[i].value);
+               intel_ring_emit_reg(engine, w->reg[i].addr);
+               intel_ring_emit(engine, w->reg[i].value);
        }
-       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_emit(engine, MI_NOOP);
 
-       intel_ring_advance(ring);
+       intel_ring_advance(engine);
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -789,26 +780,26 @@ static int wa_add(struct drm_i915_private *dev_priv,
 
 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+                                i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct i915_workarounds *wa = &dev_priv->workarounds;
-       const uint32_t index = wa->hw_whitelist_count[ring->id];
+       const uint32_t index = wa->hw_whitelist_count[engine->id];
 
        if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
                return -EINVAL;
 
-       WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+       WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
                 i915_mmio_reg_offset(reg));
-       wa->hw_whitelist_count[ring->id]++;
+       wa->hw_whitelist_count[engine->id]++;
 
        return 0;
 }
 
-static int gen8_init_workarounds(struct intel_engine_cs *ring)
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -857,13 +848,12 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int bdw_init_workarounds(struct intel_engine_cs *ring)
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen8_init_workarounds(ring);
+       ret = gen8_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -881,18 +871,17 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
                          /* WaForceContextSaveRestoreNonCoherent:bdw */
                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
                          /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                         (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+                         (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
        return 0;
 }
 
-static int chv_init_workarounds(struct intel_engine_cs *ring)
+static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen8_init_workarounds(ring);
+       ret = gen8_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -905,38 +894,41 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int gen9_init_workarounds(struct intel_engine_cs *ring)
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl */
+       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 
-       /* WaDisableKillLogic:bxt,skl */
+       /* WaDisableKillLogic:bxt,skl,kbl */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
                   ECOCHK_DIS_TLB);
 
-       /* WaDisablePartialInstShootdown:skl,bxt */
+       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+       /* WaDisablePartialInstShootdown:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+                         FLOW_CONTROL_ENABLE |
                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
-       /* Syncing dependencies between camera and graphics:skl,bxt */
+       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
        /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                                  GEN9_DG_MIRROR_FIX_ENABLE);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
                /*
@@ -946,62 +938,88 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
                 */
        }
 
-       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
-               WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
-                                 GEN9_ENABLE_YV12_BUGFIX);
+       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+                         GEN9_ENABLE_YV12_BUGFIX |
+                         GEN9_ENABLE_GPGPU_PREEMPTION);
 
-       /* Wa4x4STCOptimizationDisable:skl,bxt */
-       /* WaDisablePartialResolveInVc:skl,bxt */
+       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
        WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
                                         GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 
-       /* WaCcsTlbPrefetchDisable:skl,bxt */
+       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
        WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 
        /* WaDisableMaskBasedCammingInRCC:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
                                  PIXEL_MASK_CAMMING_DISABLE);
 
-       /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
-       tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-           IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
-               tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
-       WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+        * both tied to WaForceContextSaveRestoreNonCoherent
+        * in some hsds for skl. We keep the tie for all gen9. The
+        * documentation is a bit hazy and so we want to get common behaviour,
+        * even though there is no clear evidence we would need both on kbl/bxt.
+        * This area has been source of system hangs so we play it safe
+        * and mimic the skl regardless of what bspec says.
+        *
+        * Use Force Non-Coherent whenever executing a 3D context. This
+        * is a workaround for a possible hang in the unlikely event
+        * a TLB invalidation occurs during a PSD flush.
+        */
 
-       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-       if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+       /* WaForceEnableNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT);
+
+       /* WaDisableHDCInvalidation:skl,bxt,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  BDW_DISABLE_HDC_INVALIDATION);
+
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+       if (IS_SKYLAKE(dev_priv) ||
+           IS_KABYLAKE(dev_priv) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
-       /* WaDisableSTUnitPowerOptimization:skl,bxt */
+       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
-       /* WaOCLCoherentLineFlush:skl,bxt */
+       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
-       ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
        if (ret)
                return ret;
 
-       /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
-       ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+       ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+       if (ret)
+               return ret;
+
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
        if (ret)
                return ret;
 
        return 0;
 }
 
-static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
@@ -1040,13 +1058,12 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
        return 0;
 }
 
-static int skl_init_workarounds(struct intel_engine_cs *ring)
+static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen9_init_workarounds(ring);
+       ret = gen9_init_workarounds(engine);
        if (ret)
                return ret;
 
@@ -1055,12 +1072,12 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
         * until D0 which is the default case so this is equivalent to
         * !WaDisablePerCtxtPreemptionGranularityControl:skl
         */
-       if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
                I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
        }
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
                I915_WRITE(FF_SLICE_CS_CHICKEN2,
                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1069,75 +1086,61 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
         * involving this register should also be added to WA batch as required.
         */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
                /* WaDisableLSQCROPERFforOCL:skl */
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
 
        /* WaEnableGapsTsvCreditFix:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
                I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
                                           GEN9_GAPS_TSV_CREDIT_DISABLE));
        }
 
        /* WaDisablePowerCompilerClockGating:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       /* This is tied to WaForceContextSaveRestoreNonCoherent */
-       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
-               /*
-                *Use Force Non-Coherent whenever executing a 3D context. This
-                * is a workaround for a possible hang in the unlikely event
-                * a TLB invalidation occurs during a PSD flush.
-                */
-               /* WaForceEnableNonCoherent:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FORCE_NON_COHERENT);
-
-               /* WaDisableHDCInvalidation:skl */
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          BDW_DISABLE_HDC_INVALIDATION);
-       }
-
        /* WaBarrierPerformanceFixDisable:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE |
                                  HDC_BARRIER_PERFORMANCE_DISABLE);
 
        /* WaDisableSbeCacheDispatchPortSharing:skl */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaDisableGafsUnitClkGating:skl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
        /* WaDisableLSQCROPERFforOCL:skl */
-       ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
                return ret;
 
-       return skl_tune_iz_hashing(ring);
+       return skl_tune_iz_hashing(engine);
 }
 
-static int bxt_init_workarounds(struct intel_engine_cs *ring)
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       ret = gen9_init_workarounds(ring);
+       ret = gen9_init_workarounds(engine);
        if (ret)
                return ret;
 
        /* WaStoreMultiplePTEenable:bxt */
        /* This is a requirement according to Hardware specification */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
        /* WaSetClckGatingDisableMedia:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
                                            ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
        }
@@ -1146,8 +1149,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          STALL_DOP_GATING_DISABLE);
 
+       /* WaDisablePooledEuLoadBalancingFix:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+                                 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
        /* WaDisableSbeCacheDispatchPortSharing:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1157,54 +1166,117 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
        /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
        /* WaDisableLSQCROPERFforOCL:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+               ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
                if (ret)
                        return ret;
 
-               ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+               ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
                if (ret)
                        return ret;
        }
 
+       /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+       /* WaInsertDummyPushConstPs:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
        return 0;
 }
 
-int init_workarounds_ring(struct intel_engine_cs *ring)
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaEnableGapsTsvCreditFix:kbl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableDynamicCreditSharing:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT(GAMT_CHKN_BIT_REG,
+                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE);
+
+       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+        * involving this register should also be added to WA batch as required.
+        */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+               /* WaDisableLSQCROPERFforOCL:kbl */
+               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+                          GEN8_LQSC_RO_PERF_DIS);
+
+       /* WaInsertDummyPushConstPs:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaDisableGafsUnitClkGating:kbl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:kbl */
+       WA_SET_BIT_MASKED(
+               GEN7_HALF_SLICE_CHICKEN1,
+               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
 
-       WARN_ON(ring->id != RCS);
+       return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       WARN_ON(engine->id != RCS);
 
        dev_priv->workarounds.count = 0;
        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-       if (IS_BROADWELL(dev))
-               return bdw_init_workarounds(ring);
+       if (IS_BROADWELL(dev_priv))
+               return bdw_init_workarounds(engine);
+
+       if (IS_CHERRYVIEW(dev_priv))
+               return chv_init_workarounds(engine);
 
-       if (IS_CHERRYVIEW(dev))
-               return chv_init_workarounds(ring);
+       if (IS_SKYLAKE(dev_priv))
+               return skl_init_workarounds(engine);
 
-       if (IS_SKYLAKE(dev))
-               return skl_init_workarounds(ring);
+       if (IS_BROXTON(dev_priv))
+               return bxt_init_workarounds(engine);
 
-       if (IS_BROXTON(dev))
-               return bxt_init_workarounds(ring);
+       if (IS_KABYLAKE(dev_priv))
+               return kbl_init_workarounds(engine);
 
        return 0;
 }
 
-static int init_render_ring(struct intel_engine_cs *ring)
+static int init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = init_ring_common(ring);
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret = init_ring_common(engine);
        if (ret)
                return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-       if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+       if (IS_GEN(dev_priv, 4, 6))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
        /* We need to disable the AsyncFlip performance optimisations in order
@@ -1213,22 +1285,22 @@ static int init_render_ring(struct intel_engine_cs *ring)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
        /* WaEnableFlushTlbInvalidationMode:snb */
-       if (INTEL_INFO(dev)->gen == 6)
+       if (IS_GEN6(dev_priv))
                I915_WRITE(GFX_MODE,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-       if (IS_GEN7(dev))
+       if (IS_GEN7(dev_priv))
                I915_WRITE(GFX_MODE_GEN7,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev_priv)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
                 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1238,19 +1310,18 @@ static int init_render_ring(struct intel_engine_cs *ring)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_DPF(dev))
-               I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+       if (HAS_L3_DPF(dev_priv))
+               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
-static void render_ring_cleanup(struct intel_engine_cs *ring)
+static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (dev_priv->semaphore_obj) {
                i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1258,20 +1329,20 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
                dev_priv->semaphore_obj = NULL;
        }
 
-       intel_fini_pipe_control(ring);
+       intel_fini_pipe_control(engine);
 }
 
 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-       struct intel_engine_cs *signaller = signaller_req->ring;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *signaller = signaller_req->engine;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
-       int i, ret, num_rings;
+       enum intel_engine_id id;
+       int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1279,23 +1350,21 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
        if (ret)
                return ret;
 
-       for_each_ring(waiter, dev_priv, i) {
-               u32 seqno;
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+       for_each_engine_id(waiter, dev_priv, id) {
+               u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
-                                          PIPE_CONTROL_FLUSH_ENABLE);
+                                          PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->id));
+                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
        }
 
@@ -1306,13 +1375,13 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-       struct intel_engine_cs *signaller = signaller_req->ring;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *signaller = signaller_req->engine;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
-       int i, ret, num_rings;
+       enum intel_engine_id id;
+       int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1320,21 +1389,19 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
        if (ret)
                return ret;
 
-       for_each_ring(waiter, dev_priv, i) {
-               u32 seqno;
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+       for_each_engine_id(waiter, dev_priv, id) {
+               u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->id));
+                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
        }
 
@@ -1344,14 +1411,14 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                       unsigned int num_dwords)
 {
-       struct intel_engine_cs *signaller = signaller_req->ring;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *signaller = signaller_req->engine;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *useless;
-       int i, ret, num_rings;
+       enum intel_engine_id id;
+       int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1359,15 +1426,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
        if (ret)
                return ret;
 
-       for_each_ring(useless, dev_priv, i) {
-               i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
+       for_each_engine_id(useless, dev_priv, id) {
+               i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1389,30 +1454,59 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (ring->semaphore.signal)
-               ret = ring->semaphore.signal(req, 4);
+       if (engine->semaphore.signal)
+               ret = engine->semaphore.signal(req, 4);
        else
                ret = intel_ring_begin(req, 4);
 
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-       intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(ring);
+       intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(engine,
+                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, MI_USER_INTERRUPT);
+       __intel_ring_advance(engine);
 
        return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->engine;
+       int ret;
+
+       if (engine->semaphore.signal)
+               ret = engine->semaphore.signal(req, 8);
+       else
+               ret = intel_ring_begin(req, 8);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                PIPE_CONTROL_CS_STALL |
+                                PIPE_CONTROL_QW_WRITE));
+       intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       /* We're thrashing one dword of HWS. */
+       intel_ring_emit(engine, 0);
+       intel_ring_emit(engine, MI_USER_INTERRUPT);
+       intel_ring_emit(engine, MI_NOOP);
+       __intel_ring_advance(engine);
+
+       return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
                                              u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        return dev_priv->last_seqno < seqno;
 }
 
@@ -1429,8 +1523,10 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
-       struct intel_engine_cs *waiter = waiter_req->ring;
-       struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+       struct intel_engine_cs *waiter = waiter_req->engine;
+       struct drm_i915_private *dev_priv = waiter_req->i915;
+       u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+       struct i915_hw_ppgtt *ppgtt;
        int ret;
 
        ret = intel_ring_begin(waiter_req, 4);
@@ -1439,14 +1535,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 
        intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
                                MI_SEMAPHORE_GLOBAL_GTT |
-                               MI_SEMAPHORE_POLL |
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-       intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+       intel_ring_emit(waiter, lower_32_bits(offset));
+       intel_ring_emit(waiter, upper_32_bits(offset));
        intel_ring_advance(waiter);
+
+       /* When the !RCS engines idle waiting upon a semaphore, they lose their
+        * pagetables and we must reload them before executing the batch.
+        * We do this on the i915_switch_context() following the wait and
+        * before the dispatch.
+        */
+       ppgtt = waiter_req->ctx->ppgtt;
+       if (ppgtt && waiter_req->engine->id != RCS)
+               ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
        return 0;
 }
 
@@ -1455,7 +1557,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
-       struct intel_engine_cs *waiter = waiter_req->ring;
+       struct intel_engine_cs *waiter = waiter_req->engine;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
@@ -1475,7 +1577,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
                return ret;
 
        /* If seqno wrap happened, omit the wait with no-ops */
-       if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+       if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
                intel_ring_emit(waiter, dw1 | wait_mbox);
                intel_ring_emit(waiter, seqno);
                intel_ring_emit(waiter, 0);
@@ -1491,205 +1593,97 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
        return 0;
 }
 
-#define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
-do {                                                                   \
-       intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
-                PIPE_CONTROL_DEPTH_STALL);                             \
-       intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
-       intel_ring_emit(ring__, 0);                                                     \
-       intel_ring_emit(ring__, 0);                                                     \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
 {
-       struct intel_engine_cs *ring = req->ring;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       int ret;
-
-       /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
-        * incoherent with writes to memory, i.e. completely fubar,
-        * so we need to use PIPE_NOTIFY instead.
+       /* MI_STORE are internally buffered by the GPU and not flushed
+        * either by MI_FLUSH or SyncFlush or any other combination of
+        * MI commands.
         *
-        * However, we also need to workaround the qword write
-        * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
-        * memory before requesting an interrupt.
+        * "Only the submission of the store operation is guaranteed.
+        * The write result will be complete (coherent) some time later
+        * (this is practically a finite period but there is no guaranteed
+        * latency)."
+        *
+        * Empirically, we observe that we need a delay of at least 75us to
+        * be sure that the seqno write is visible by the CPU.
         */
-       ret = intel_ring_begin(req, 32);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-       intel_ring_emit(ring, 0);
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(ring, scratch_addr);
-
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
-                       PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-       intel_ring_emit(ring, 0);
-       __intel_ring_advance(ring);
-
-       return 0;
+       usleep_range(125, 250);
 }
 
-static u32
-gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void
+gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
+
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
-        * ACTHD) before reading the status page. */
-       if (!lazy_coherency) {
-               struct drm_i915_private *dev_priv = ring->dev->dev_private;
-               POSTING_READ(RING_ACTHD(ring->mmio_base));
-       }
-
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static u32
-ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
-{
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+        * ACTHD) before reading the status page.
+        *
+        * Note that this effectively stalls the read by the time it takes to
+        * do a memory transaction, which more or less ensures that the write
+        * from the GPU has sufficient time to invalidate the CPU cacheline.
+        * Alternatively we could delay the interrupt from the CS ring to give
+        * the write time to land, but that would incur a delay after every
+        * batch i.e. much more frequent than a delay when waiting for the
+        * interrupt (with the same net latency).
+        *
+        * Also note that to prevent whole machine hangs on gen7, we have to
+        * take the spinlock to guard against concurrent cacheline access.
+        */
+       spin_lock_irq(&dev_priv->uncore.lock);
+       POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
-{
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+gen5_irq_enable(struct intel_engine_cs *engine)
 {
-       return ring->scratch.cpu_page[0];
+       gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+gen5_irq_disable(struct intel_engine_cs *engine)
 {
-       ring->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0)
-               gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0)
-               gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               dev_priv->irq_mask |= ring->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
 }
 
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+static void
+i8xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
+       POSTING_READ16(RING_IMR(engine->mmio_base));
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               dev_priv->irq_mask |= ring->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
 }
 
 static int
@@ -1697,160 +1691,107 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
               u32     invalidate_domains,
               u32     flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_FLUSH);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, MI_FLUSH);
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
        return 0;
 }
 
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 4);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-       intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
-       intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(ring);
+       intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(engine,
+                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, MI_USER_INTERRUPT);
+       __intel_ring_advance(engine);
 
        return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS)
-                       I915_WRITE_IMR(ring,
-                                      ~(ring->irq_enable_mask |
-                                        GT_PARITY_ERROR(dev)));
-               else
-                       I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+               I915_WRITE_IMR(engine,
+                              ~(engine->irq_enable_mask |
+                                GT_PARITY_ERROR(dev_priv)));
+       else
+               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS)
-                       I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
-               else
-                       I915_WRITE_IMR(ring, ~0);
-               gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
+       else
+               I915_WRITE_IMR(engine, ~0);
+       gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               I915_WRITE_IMR(ring, ~0);
-               gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~0);
+       gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS) {
-                       I915_WRITE_IMR(ring,
-                                      ~(ring->irq_enable_mask |
-                                        GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-               } else {
-                       I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               }
-               POSTING_READ(RING_IMR(ring->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+               I915_WRITE_IMR(engine,
+                              ~(engine->irq_enable_mask |
+                                GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+       else
+               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && ring->id == RCS) {
-                       I915_WRITE_IMR(ring,
-                                      ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-               } else {
-                       I915_WRITE_IMR(ring, ~0);
-               }
-               POSTING_READ(RING_IMR(ring->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+               I915_WRITE_IMR(engine,
+                              ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+       else
+               I915_WRITE_IMR(engine, ~0);
 }
 
 static int
@@ -1858,20 +1799,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 length,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring,
+       intel_ring_emit(engine,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_NON_SECURE_I965));
-       intel_ring_emit(ring, offset);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, offset);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -1885,8 +1826,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
-       u32 cs_offset = ring->scratch.gtt_offset;
+       struct intel_engine_cs *engine = req->engine;
+       u32 cs_offset = engine->scratch.gtt_offset;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -1894,13 +1835,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                return ret;
 
        /* Evict the invalid PTE TLBs */
-       intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-       intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-       intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-       intel_ring_emit(ring, cs_offset);
-       intel_ring_emit(ring, 0xdeadbeef);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+       intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+       intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+       intel_ring_emit(engine, cs_offset);
+       intel_ring_emit(engine, 0xdeadbeef);
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
                if (len > I830_BATCH_LIMIT)
@@ -1914,16 +1855,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                 * stable batch scratch bo area (so that the CS never
                 * stumbles over its tlb invalidation bug) ...
                 */
-               intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-               intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-               intel_ring_emit(ring, cs_offset);
-               intel_ring_emit(ring, 4096);
-               intel_ring_emit(ring, offset);
-
-               intel_ring_emit(ring, MI_FLUSH);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
+               intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+               intel_ring_emit(engine,
+                               BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+               intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+               intel_ring_emit(engine, cs_offset);
+               intel_ring_emit(engine, 4096);
+               intel_ring_emit(engine, offset);
+
+               intel_ring_emit(engine, MI_FLUSH);
+               intel_ring_emit(engine, MI_NOOP);
+               intel_ring_advance(engine);
 
                /* ... and execute it. */
                offset = cs_offset;
@@ -1933,10 +1875,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-                                       0 : MI_BATCH_NON_SECURE));
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+       intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                         0 : MI_BATCH_NON_SECURE));
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -1946,58 +1888,58 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-                                       0 : MI_BATCH_NON_SECURE));
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+       intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+                                         0 : MI_BATCH_NON_SECURE));
+       intel_ring_advance(engine);
 
        return 0;
 }
 
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah)
                return;
 
-       drm_pci_free(ring->dev, dev_priv->status_page_dmah);
-       ring->status_page.page_addr = NULL;
+       drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
+       engine->status_page.page_addr = NULL;
 }
 
-static void cleanup_status_page(struct intel_engine_cs *ring)
+static void cleanup_status_page(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = ring->status_page.obj;
+       obj = engine->status_page.obj;
        if (obj == NULL)
                return;
 
        kunmap(sg_page(obj->pages->sgl));
        i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
-       ring->status_page.obj = NULL;
+       engine->status_page.obj = NULL;
 }
 
-static int init_status_page(struct intel_engine_cs *ring)
+static int init_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_object *obj = ring->status_page.obj;
+       struct drm_i915_gem_object *obj = engine->status_page.obj;
 
        if (obj == NULL) {
                unsigned flags;
                int ret;
 
-               obj = i915_gem_alloc_object(ring->dev, 4096);
-               if (obj == NULL) {
+               obj = i915_gem_object_create(engine->i915->dev, 4096);
+               if (IS_ERR(obj)) {
                        DRM_ERROR("Failed to allocate status page\n");
-                       return -ENOMEM;
+                       return PTR_ERR(obj);
                }
 
                ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2005,7 +1947,7 @@ static int init_status_page(struct intel_engine_cs *ring)
                        goto err_unref;
 
                flags = 0;
-               if (!HAS_LLC(ring->dev))
+               if (!HAS_LLC(engine->i915))
                        /* On g33, we cannot place HWS above 256MiB, so
                         * restrict its pinning to the low mappable arena.
                         * Though this restriction is not documented for
@@ -2024,75 +1966,58 @@ err_unref:
                        return ret;
                }
 
-               ring->status_page.obj = obj;
+               engine->status_page.obj = obj;
        }
 
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
-       ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
-       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+       engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-                       ring->name, ring->status_page.gfx_addr);
+                       engine->name, engine->status_page.gfx_addr);
 
        return 0;
 }
 
-static int init_phys_status_page(struct intel_engine_cs *ring)
+static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
-                       drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+                       drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
                if (!dev_priv->status_page_dmah)
                        return -ENOMEM;
        }
 
-       ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+       engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+       memset(engine->status_page.page_addr, 0, PAGE_SIZE);
 
        return 0;
 }
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
+       GEM_BUG_ON(ringbuf->vma == NULL);
+       GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
        if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-               vunmap(ringbuf->virtual_start);
+               i915_gem_object_unpin_map(ringbuf->obj);
        else
-               iounmap(ringbuf->virtual_start);
+               i915_vma_unpin_iomap(ringbuf->vma);
        ringbuf->virtual_start = NULL;
-       ringbuf->vma = NULL;
-       i915_gem_object_ggtt_unpin(ringbuf->obj);
-}
 
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       void *addr;
-       int i;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               return NULL;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       return addr;
+       i915_gem_object_ggtt_unpin(ringbuf->obj);
+       ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
                                     struct intel_ringbuffer *ringbuf)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = ringbuf->obj;
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
        unsigned flags = PIN_OFFSET_BIAS | 4096;
+       void *addr;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2101,15 +2026,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return ret;
 
                ret = i915_gem_object_set_to_cpu_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto err_unpin;
 
-               ringbuf->virtual_start = vmap_obj(obj);
-               if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -ENOMEM;
+               addr = i915_gem_object_pin_map(obj);
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
+                       goto err_unpin;
                }
        } else {
                ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
@@ -2118,25 +2041,26 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return ret;
 
                ret = i915_gem_object_set_to_gtt_domain(obj, true);
-               if (ret) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return ret;
-               }
+               if (ret)
+                       goto err_unpin;
 
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
-                                                   i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (ringbuf->virtual_start == NULL) {
-                       i915_gem_object_ggtt_unpin(obj);
-                       return -EINVAL;
+               addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
+                       goto err_unpin;
                }
        }
 
+       ringbuf->virtual_start = addr;
        ringbuf->vma = i915_gem_obj_to_ggtt(obj);
-
        return 0;
+
+err_unpin:
+       i915_gem_object_ggtt_unpin(obj);
+       return ret;
 }
 
 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2154,9 +2078,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        if (!HAS_LLC(dev))
                obj = i915_gem_object_create_stolen(dev, ringbuf->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ringbuf->size);
-       if (obj == NULL)
-               return -ENOMEM;
+               obj = i915_gem_object_create(dev, ringbuf->size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
@@ -2179,7 +2103,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
                return ERR_PTR(-ENOMEM);
        }
 
-       ring->ring = engine;
+       ring->engine = engine;
        list_add(&ring->link, &engine->buffers);
 
        ring->size = size;
@@ -2188,13 +2112,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
         * of the buffer.
         */
        ring->effective_size = size;
-       if (IS_I830(engine->dev) || IS_845G(engine->dev))
+       if (IS_I830(engine->i915) || IS_845G(engine->i915))
                ring->effective_size -= 2 * CACHELINE_BYTES;
 
        ring->last_retired_head = -1;
        intel_ring_update_space(ring);
 
-       ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+       ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
                                 engine->name, ret);
@@ -2214,238 +2138,257 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
        kfree(ring);
 }
 
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+       int ret;
+
+       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+
+       if (ce->pin_count++)
+               return 0;
+
+       if (ce->state) {
+               ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+               if (ret)
+                       goto error;
+       }
+
+       /* The kernel context is only used as a placeholder for flushing the
+        * active context. It is never used for submitting user rendering and
+        * as such never requires the golden render context, and so we can skip
+        * emitting it when we switch to the kernel context. This is required
+        * as during eviction we cannot allocate and pin the renderstate in
+        * order to initialise the context.
+        */
+       if (ctx == ctx->i915->kernel_context)
+               ce->initialised = true;
+
+       i915_gem_context_reference(ctx);
+       return 0;
+
+error:
+       ce->pin_count = 0;
+       return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+                                    struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+
+       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+
+       if (--ce->pin_count)
+               return;
+
+       if (ce->state)
+               i915_gem_object_ggtt_unpin(ce->state);
+
+       i915_gem_context_unreference(ctx);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
-                                 struct intel_engine_cs *ring)
+                                 struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_ringbuffer *ringbuf;
        int ret;
 
-       WARN_ON(ring->buffer);
+       WARN_ON(engine->buffer);
+
+       engine->i915 = dev_priv;
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       INIT_LIST_HEAD(&engine->buffers);
+       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       memset(engine->semaphore.sync_seqno, 0,
+              sizeof(engine->semaphore.sync_seqno));
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->execlist_queue);
-       INIT_LIST_HEAD(&ring->buffers);
-       i915_gem_batch_pool_init(dev, &ring->batch_pool);
-       memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
 
-       init_waitqueue_head(&ring->irq_queue);
+       /* We may need to do things with the shrinker which
+        * require us to immediately switch back to the default
+        * context. This can cause a problem as pinning the
+        * default context also requires GTT space which may not
+        * be available. To avoid this we always pin the default
+        * context.
+        */
+       ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+       if (ret)
+               goto error;
 
-       ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+       ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
                ret = PTR_ERR(ringbuf);
                goto error;
        }
-       ring->buffer = ringbuf;
+       engine->buffer = ringbuf;
 
-       if (I915_NEED_GFX_HWS(dev)) {
-               ret = init_status_page(ring);
+       if (I915_NEED_GFX_HWS(dev_priv)) {
+               ret = init_status_page(engine);
                if (ret)
                        goto error;
        } else {
-               WARN_ON(ring->id != RCS);
-               ret = init_phys_status_page(ring);
+               WARN_ON(engine->id != RCS);
+               ret = init_phys_status_page(engine);
                if (ret)
                        goto error;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
-                               ring->name, ret);
+                               engine->name, ret);
                intel_destroy_ringbuffer_obj(ringbuf);
                goto error;
        }
 
-       ret = i915_cmd_parser_init_ring(ring);
+       ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
 
        return 0;
 
 error:
-       intel_cleanup_ring_buffer(ring);
+       intel_cleanup_engine(engine);
        return ret;
 }
 
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(engine))
                return;
 
-       dev_priv = to_i915(ring->dev);
+       dev_priv = engine->i915;
 
-       if (ring->buffer) {
-               intel_stop_ring_buffer(ring);
-               WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+       if (engine->buffer) {
+               intel_stop_engine(engine);
+               WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-               intel_unpin_ringbuffer_obj(ring->buffer);
-               intel_ringbuffer_free(ring->buffer);
-               ring->buffer = NULL;
+               intel_unpin_ringbuffer_obj(engine->buffer);
+               intel_ringbuffer_free(engine->buffer);
+               engine->buffer = NULL;
        }
 
-       if (ring->cleanup)
-               ring->cleanup(ring);
+       if (engine->cleanup)
+               engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(ring->dev)) {
-               cleanup_status_page(ring);
+       if (I915_NEED_GFX_HWS(dev_priv)) {
+               cleanup_status_page(engine);
        } else {
-               WARN_ON(ring->id != RCS);
-               cleanup_phys_status_page(ring);
-       }
-
-       i915_cmd_parser_fini_ring(ring);
-       i915_gem_batch_pool_fini(&ring->batch_pool);
-       ring->dev = NULL;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       struct drm_i915_gem_request *request;
-       unsigned space;
-       int ret;
-
-       if (intel_ring_space(ringbuf) >= n)
-               return 0;
-
-       /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
-
-       list_for_each_entry(request, &ring->request_list, list) {
-               space = __intel_ring_space(request->postfix, ringbuf->tail,
-                                          ringbuf->size);
-               if (space >= n)
-                       break;
+               WARN_ON(engine->id != RCS);
+               cleanup_phys_status_page(engine);
        }
 
-       if (WARN_ON(&request->list == &ring->request_list))
-               return -ENOSPC;
+       i915_cmd_parser_fini_ring(engine);
+       i915_gem_batch_pool_fini(&engine->batch_pool);
+       intel_engine_fini_breadcrumbs(engine);
 
-       ret = i915_wait_request(request);
-       if (ret)
-               return ret;
+       intel_ring_context_unpin(dev_priv->kernel_context, engine);
 
-       ringbuf->space = space;
-       return 0;
+       engine->i915 = NULL;
 }
 
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-       uint32_t __iomem *virt;
-       int rem = ringbuf->size - ringbuf->tail;
-
-       virt = ringbuf->virtual_start + ringbuf->tail;
-       rem /= 4;
-       while (rem--)
-               iowrite32(MI_NOOP, virt++);
-
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
-}
-
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req;
 
        /* Wait upon the last request to be completed */
-       if (list_empty(&ring->request_list))
+       if (list_empty(&engine->request_list))
                return 0;
 
-       req = list_entry(ring->request_list.prev,
-                       struct drm_i915_gem_request,
-                       list);
+       req = list_entry(engine->request_list.prev,
+                        struct drm_i915_gem_request,
+                        list);
 
        /* Make sure we do not trigger any retires */
        return __i915_wait_request(req,
-                                  atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
-                                  to_i915(ring->dev)->mm.interruptible,
+                                  req->i915->mm.interruptible,
                                   NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       request->ringbuf = request->ring->buffer;
-       return 0;
-}
+       int ret;
 
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-       /*
-        * The first call merely notes the reserve request and is common for
-        * all back ends. The subsequent localised _begin() call actually
-        * ensures that the reservation is available. Without the begin, if
-        * the request creator immediately submitted the request without
-        * adding any commands to it then there might not actually be
-        * sufficient room for the submission commands.
+       /* Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
         */
-       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+       request->reserved_space += LEGACY_REQUEST_SIZE;
 
-       return intel_ring_begin(request, 0);
-}
+       request->ringbuf = request->engine->buffer;
 
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
-       WARN_ON(ringbuf->reserved_size);
-       WARN_ON(ringbuf->reserved_in_use);
+       ret = intel_ring_begin(request, 0);
+       if (ret)
+               return ret;
 
-       ringbuf->reserved_size = size;
+       request->reserved_space -= LEGACY_REQUEST_SIZE;
+       return 0;
 }
 
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-       WARN_ON(ringbuf->reserved_in_use);
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_engine_cs *engine = req->engine;
+       struct drm_i915_gem_request *target;
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
-}
+       intel_ring_update_space(ringbuf);
+       if (ringbuf->space >= bytes)
+               return 0;
 
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
-       WARN_ON(ringbuf->reserved_in_use);
+       /*
+        * Space is reserved in the ringbuffer for finalising the request,
+        * as that cannot be allowed to fail. During request finalisation,
+        * reserved_space is set to 0 to stop the overallocation and the
+        * assumption is that then we never need to wait (which has the
+        * risk of failing with EINTR).
+        *
+        * See also i915_gem_request_alloc() and i915_add_request().
+        */
+       GEM_BUG_ON(!req->reserved_space);
 
-       ringbuf->reserved_in_use = true;
-       ringbuf->reserved_tail   = ringbuf->tail;
-}
+       list_for_each_entry(target, &engine->request_list, list) {
+               unsigned space;
 
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
-       WARN_ON(!ringbuf->reserved_in_use);
-       if (ringbuf->tail > ringbuf->reserved_tail) {
-               WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
-                    "request reserved size too small: %d vs %d!\n",
-                    ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
-       } else {
                /*
-                * The ring was wrapped while the reserved space was in use.
-                * That means that some unknown amount of the ring tail was
-                * no-op filled and skipped. Thus simply adding the ring size
-                * to the tail and doing the above space check will not work.
-                * Rather than attempt to track how much tail was skipped,
-                * it is much simpler to say that also skipping the sanity
-                * check every once in a while is not a big issue.
+                * The request queue is per-engine, so can contain requests
+                * from multiple ringbuffers. Here, we must ignore any that
+                * aren't from the ringbuffer we're considering.
                 */
+               if (target->ringbuf != ringbuf)
+                       continue;
+
+               /* Would completion of this request free enough space? */
+               space = __intel_ring_space(target->postfix, ringbuf->tail,
+                                          ringbuf->size);
+               if (space >= bytes)
+                       break;
        }
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
+       if (WARN_ON(&target->list == &engine->request_list))
+               return -ENOSPC;
+
+       return i915_wait_request(target);
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct intel_ringbuffer *ringbuf = ring->buffer;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        int remain_actual = ringbuf->size - ringbuf->tail;
-       int ret, total_bytes, wait_bytes = 0;
+       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       int bytes = num_dwords * sizeof(u32);
+       int total_bytes, wait_bytes;
        bool need_wrap = false;
 
-       if (ringbuf->reserved_in_use)
-               total_bytes = bytes;
-       else
-               total_bytes = bytes + ringbuf->reserved_size;
+       total_bytes = bytes + req->reserved_space;
 
        if (unlikely(bytes > remain_usable)) {
                /*
@@ -2454,62 +2397,50 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
                 */
                wait_bytes = remain_actual + total_bytes;
                need_wrap = true;
+       } else if (unlikely(total_bytes > remain_usable)) {
+               /*
+                * The base request will fit but the reserved space
+                * falls off the end. So we don't need an immediate wrap
+                * and only need to effectively wait for the reserved
+                * size space from the start of ringbuffer.
+                */
+               wait_bytes = remain_actual + req->reserved_space;
        } else {
-               if (unlikely(total_bytes > remain_usable)) {
-                       /*
-                        * The base request will fit but the reserved space
-                        * falls off the end. So don't need an immediate wrap
-                        * and only need to effectively wait for the reserved
-                        * size space from the start of ringbuffer.
-                        */
-                       wait_bytes = remain_actual + ringbuf->reserved_size;
-               } else if (total_bytes > ringbuf->space) {
-                       /* No wrapping required, just waiting. */
-                       wait_bytes = total_bytes;
-               }
+               /* No wrapping required, just waiting. */
+               wait_bytes = total_bytes;
        }
 
-       if (wait_bytes) {
-               ret = ring_wait_for_space(ring, wait_bytes);
+       if (wait_bytes > ringbuf->space) {
+               int ret = wait_for_space(req, wait_bytes);
                if (unlikely(ret))
                        return ret;
 
-               if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
+               intel_ring_update_space(ringbuf);
+               if (unlikely(ringbuf->space < wait_bytes))
+                       return -EAGAIN;
        }
 
-       return 0;
-}
-
-int intel_ring_begin(struct drm_i915_gem_request *req,
-                    int num_dwords)
-{
-       struct intel_engine_cs *ring;
-       struct drm_i915_private *dev_priv;
-       int ret;
+       if (unlikely(need_wrap)) {
+               GEM_BUG_ON(remain_actual > ringbuf->space);
+               GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
 
-       WARN_ON(req == NULL);
-       ring = req->ring;
-       dev_priv = ring->dev->dev_private;
-
-       ret = i915_gem_check_wedge(&dev_priv->gpu_error,
-                                  dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
-       ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
-       if (ret)
-               return ret;
+               /* Fill the tail with MI_NOOP */
+               memset(ringbuf->virtual_start + ringbuf->tail,
+                      0, remain_actual);
+               ringbuf->tail = 0;
+               ringbuf->space -= remain_actual;
+       }
 
-       ring->buffer->space -= num_dwords * sizeof(uint32_t);
+       ringbuf->space -= bytes;
+       GEM_BUG_ON(ringbuf->space < 0);
        return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+       struct intel_engine_cs *engine = req->engine;
+       int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
@@ -2521,66 +2452,100 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
                return ret;
 
        while (num_dwords--)
-               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(engine, MI_NOOP);
 
-       intel_ring_advance(ring);
+       intel_ring_advance(engine);
 
        return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
-               I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
-               I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
-               if (HAS_VEBOX(dev))
-                       I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+       /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+        * so long as the semaphore value in the register/page is greater
+        * than the sync value), so whenever we reset the seqno,
+        * so long as we reset the tracking semaphore value to 0, it will
+        * always be before the next request's seqno. If we don't reset
+        * the semaphore value, then when the seqno moves backwards all
+        * future waits will complete instantly (causing rendering corruption).
+        */
+       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+               I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+               I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+               if (HAS_VEBOX(dev_priv))
+                       I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
        }
+       if (dev_priv->semaphore_obj) {
+               struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
+               struct page *page = i915_gem_object_get_dirty_page(obj, 0);
+               void *semaphores = kmap(page);
+               memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+                      0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+               kunmap(page);
+       }
+       memset(engine->semaphore.sync_seqno, 0,
+              sizeof(engine->semaphore.sync_seqno));
+
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+       engine->last_submitted_seqno = seqno;
 
-       ring->set_seqno(ring, seqno);
-       ring->hangcheck.seqno = seqno;
+       engine->hangcheck.seqno = seqno;
+
+       /* After manually advancing the seqno, fake the interrupt in case
+        * there are any waiters for that seqno.
+        */
+       rcu_read_lock();
+       intel_engine_wakeup(engine);
+       rcu_read_unlock();
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                                     u32 value)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* Every tail move must follow the sequence below */
 
        /* Disable notification that the ring is IDLE. The GT
         * will then assume that it is busy and bring it out of rc6.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
        /* Clear the context id. Here be magic! */
-       I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+       I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
 
        /* Wait for the ring not to be idle, i.e. for it to wake up. */
-       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-                     GEN6_BSD_SLEEP_INDICATOR) == 0,
-                    50))
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_BSD_SLEEP_PSMI_CONTROL,
+                                      GEN6_BSD_SLEEP_INDICATOR,
+                                      0,
+                                      50))
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_TAIL(ring, value);
-       POSTING_READ(RING_TAIL(ring->mmio_base));
+       I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+       POSTING_READ_FW(RING_TAIL(engine->mmio_base));
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                               u32 invalidate, u32 flush)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t cmd;
        int ret;
 
@@ -2589,7 +2554,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(ring->dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2608,16 +2573,17 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
        if (invalidate & I915_GEM_GPU_DOMAINS)
                cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-       intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(ring->dev)->gen >= 8) {
-               intel_ring_emit(ring, 0); /* upper addr */
-               intel_ring_emit(ring, 0); /* value */
+       intel_ring_emit(engine, cmd);
+       intel_ring_emit(engine,
+                       I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+       if (INTEL_GEN(req->i915) >= 8) {
+               intel_ring_emit(engine, 0); /* upper addr */
+               intel_ring_emit(engine, 0); /* value */
        } else  {
-               intel_ring_emit(ring, 0);
-               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(engine, 0);
+               intel_ring_emit(engine, MI_NOOP);
        }
-       intel_ring_advance(ring);
+       intel_ring_advance(engine);
        return 0;
 }
 
@@ -2626,8 +2592,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
-       bool ppgtt = USES_PPGTT(ring->dev) &&
+       struct intel_engine_cs *engine = req->engine;
+       bool ppgtt = USES_PPGTT(engine->dev) &&
                        !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
@@ -2636,13 +2602,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+       intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
                        (dispatch_flags & I915_DISPATCH_RS ?
                         MI_BATCH_RESOURCE_STREAMER : 0));
-       intel_ring_emit(ring, lower_32_bits(offset));
-       intel_ring_emit(ring, upper_32_bits(offset));
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, lower_32_bits(offset));
+       intel_ring_emit(engine, upper_32_bits(offset));
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -2652,22 +2618,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                             u64 offset, u32 len,
                             unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring,
+       intel_ring_emit(engine,
                        MI_BATCH_BUFFER_START |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
                        (dispatch_flags & I915_DISPATCH_RS ?
                         MI_BATCH_RESOURCE_STREAMER : 0));
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(ring, offset);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, offset);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -2677,20 +2643,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring,
+       intel_ring_emit(engine,
                        MI_BATCH_BUFFER_START |
                        (dispatch_flags & I915_DISPATCH_SECURE ?
                         0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(ring, offset);
-       intel_ring_advance(ring);
+       intel_ring_emit(engine, offset);
+       intel_ring_advance(engine);
 
        return 0;
 }
@@ -2700,8 +2666,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_device *dev = ring->dev;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t cmd;
        int ret;
 
@@ -2710,7 +2675,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2728,164 +2693,223 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
         */
        if (invalidate & I915_GEM_DOMAIN_RENDER)
                cmd |= MI_INVALIDATE_TLB;
-       intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(dev)->gen >= 8) {
-               intel_ring_emit(ring, 0); /* upper addr */
-               intel_ring_emit(ring, 0); /* value */
+       intel_ring_emit(engine, cmd);
+       intel_ring_emit(engine,
+                       I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+       if (INTEL_GEN(req->i915) >= 8) {
+               intel_ring_emit(engine, 0); /* upper addr */
+               intel_ring_emit(engine, 0); /* value */
        } else  {
-               intel_ring_emit(ring, 0);
-               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(engine, 0);
+               intel_ring_emit(engine, MI_NOOP);
        }
-       intel_ring_advance(ring);
+       intel_ring_advance(engine);
 
        return 0;
 }
 
-int intel_init_render_ring_buffer(struct drm_device *dev)
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+                                      struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
        struct drm_i915_gem_object *obj;
-       int ret;
+       int ret, i;
 
-       ring->name = "render ring";
-       ring->id = RCS;
-       ring->exec_id = I915_EXEC_RENDER;
-       ring->mmio_base = RENDER_RING_BASE;
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               if (i915_semaphore_is_enabled(dev)) {
-                       obj = i915_gem_alloc_object(dev, 4096);
-                       if (obj == NULL) {
-                               DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+               obj = i915_gem_object_create(dev_priv->dev, 4096);
+               if (IS_ERR(obj)) {
+                       DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+                       i915.semaphores = 0;
+               } else {
+                       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+                       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(&obj->base);
+                               DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
                                i915.semaphores = 0;
                        } else {
-                               i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-                               ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-                               if (ret != 0) {
-                                       drm_gem_object_unreference(&obj->base);
-                                       DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-                                       i915.semaphores = 0;
-                               } else
-                                       dev_priv->semaphore_obj = obj;
+                               dev_priv->semaphore_obj = obj;
                        }
                }
+       }
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+               engine->semaphore.sync_to = gen8_ring_sync;
+               engine->semaphore.signal = gen8_xcs_signal;
+
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       u64 ring_offset;
+
+                       if (i != engine->id)
+                               ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+                       else
+                               ring_offset = MI_SEMAPHORE_SYNC_INVALID;
 
-               ring->init_context = intel_rcs_ctx_init;
-               ring->add_request = gen6_add_request;
-               ring->flush = gen8_render_ring_flush;
-               ring->irq_get = gen8_ring_get_irq;
-               ring->irq_put = gen8_ring_put_irq;
-               ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               ring->get_seqno = gen6_ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       WARN_ON(!dev_priv->semaphore_obj);
-                       ring->semaphore.sync_to = gen8_ring_sync;
-                       ring->semaphore.signal = gen8_rcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT;
+                       engine->semaphore.signal_ggtt[i] = ring_offset;
                }
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               ring->init_context = intel_rcs_ctx_init;
-               ring->add_request = gen6_add_request;
-               ring->flush = gen7_render_ring_flush;
-               if (INTEL_INFO(dev)->gen == 6)
-                       ring->flush = gen6_render_ring_flush;
-               ring->irq_get = gen6_ring_get_irq;
-               ring->irq_put = gen6_ring_put_irq;
-               ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               ring->get_seqno = gen6_ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       ring->semaphore.sync_to = gen6_ring_sync;
-                       ring->semaphore.signal = gen6_signal;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between RCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between VCS2 and RCS later.
-                        */
-                       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-                       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-                       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-                       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-                       ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-                       ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-                       ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-                       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->semaphore.sync_to = gen6_ring_sync;
+               engine->semaphore.signal = gen6_signal;
+
+               /*
+                * The current semaphore is only applied on pre-gen8
+                * platform.  And there is no VCS2 ring on the pre-gen8
+                * platform. So the semaphore between RCS and VCS2 is
+                * initialized as INVALID.  Gen8 will initialize the
+                * sema between VCS2 and RCS later.
+                */
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       static const struct {
+                               u32 wait_mbox;
+                               i915_reg_t mbox_reg;
+                       } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+                               [RCS] = {
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+                               },
+                               [VCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+                               },
+                               [BCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+                               },
+                               [VECS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+                               },
+                       };
+                       u32 wait_mbox;
+                       i915_reg_t mbox_reg;
+
+                       if (i == engine->id || i == VCS2) {
+                               wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+                               mbox_reg = GEN6_NOSYNC;
+                       } else {
+                               wait_mbox = sem_data[engine->id][i].wait_mbox;
+                               mbox_reg = sem_data[engine->id][i].mbox_reg;
+                       }
+
+                       engine->semaphore.mbox.wait[i] = wait_mbox;
+                       engine->semaphore.mbox.signal[i] = mbox_reg;
                }
-       } else if (IS_GEN5(dev)) {
-               ring->add_request = pc_render_add_request;
-               ring->flush = gen4_render_ring_flush;
-               ring->get_seqno = pc_render_get_seqno;
-               ring->set_seqno = pc_render_set_seqno;
-               ring->irq_get = gen5_ring_get_irq;
-               ring->irq_put = gen5_ring_put_irq;
-               ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
-                                       GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+       }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+                               struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->irq_enable = gen8_irq_enable;
+               engine->irq_disable = gen8_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->irq_enable = gen6_irq_enable;
+               engine->irq_disable = gen6_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 5) {
+               engine->irq_enable = gen5_irq_enable;
+               engine->irq_disable = gen5_irq_disable;
+               engine->irq_seqno_barrier = gen5_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               engine->irq_enable = i9xx_irq_enable;
+               engine->irq_disable = i9xx_irq_disable;
        } else {
-               ring->add_request = i9xx_add_request;
-               if (INTEL_INFO(dev)->gen < 4)
-                       ring->flush = gen2_render_ring_flush;
-               else
-                       ring->flush = gen4_render_ring_flush;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
-               if (IS_GEN2(dev)) {
-                       ring->irq_get = i8xx_ring_get_irq;
-                       ring->irq_put = i8xx_ring_put_irq;
-               } else {
-                       ring->irq_get = i9xx_ring_get_irq;
-                       ring->irq_put = i9xx_ring_put_irq;
-               }
-               ring->irq_enable_mask = I915_USER_INTERRUPT;
+               engine->irq_enable = i8xx_irq_enable;
+               engine->irq_disable = i8xx_irq_disable;
        }
-       ring->write_tail = ring_write_tail;
-
-       if (IS_HASWELL(dev))
-               ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
-       else if (IS_GEN8(dev))
-               ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 6)
-               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 4)
-               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev) || IS_845G(dev))
-               ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+                                     struct intel_engine_cs *engine)
+{
+       engine->init_hw = init_ring_common;
+       engine->write_tail = ring_write_tail;
+
+       engine->add_request = i9xx_add_request;
+       if (INTEL_GEN(dev_priv) >= 6)
+               engine->add_request = gen6_add_request;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 6)
+               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
        else
-               ring->dispatch_execbuffer = i915_dispatch_execbuffer;
-       ring->init_hw = init_render_ring;
-       ring->cleanup = render_ring_cleanup;
-
-       /* Workaround batchbuffer to combat CS tlb bug. */
-       if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
-               if (obj == NULL) {
-                       DRM_ERROR("Failed to allocate batch bo\n");
-                       return -ENOMEM;
-               }
+               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
 
-               ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-               if (ret != 0) {
-                       drm_gem_object_unreference(&obj->base);
-                       DRM_ERROR("Failed to ping batch bo\n");
-                       return ret;
-               }
+       intel_ring_init_irq(dev_priv, engine);
+       intel_ring_init_semaphores(dev_priv, engine);
+}
 
-               ring->scratch.obj = obj;
-               ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+       int ret;
+
+       engine->name = "render ring";
+       engine->id = RCS;
+       engine->exec_id = I915_EXEC_RENDER;
+       engine->hw_id = 0;
+       engine->mmio_base = RENDER_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->init_context = intel_rcs_ctx_init;
+               engine->add_request = gen8_render_add_request;
+               engine->flush = gen8_render_ring_flush;
+               if (i915_semaphore_is_enabled(dev_priv))
+                       engine->semaphore.signal = gen8_rcs_signal;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->init_context = intel_rcs_ctx_init;
+               engine->flush = gen7_render_ring_flush;
+               if (IS_GEN6(dev_priv))
+                       engine->flush = gen6_render_ring_flush;
+       } else if (IS_GEN5(dev_priv)) {
+               engine->flush = gen4_render_ring_flush;
+       } else {
+               if (INTEL_GEN(dev_priv) < 4)
+                       engine->flush = gen2_render_ring_flush;
+               else
+                       engine->flush = gen4_render_ring_flush;
+               engine->irq_enable_mask = I915_USER_INTERRUPT;
        }
 
-       ret = intel_init_ring_buffer(dev, ring);
+       if (IS_HASWELL(dev_priv))
+               engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+
+       engine->init_hw = init_render_ring;
+       engine->cleanup = render_ring_cleanup;
+
+       ret = intel_init_ring_buffer(dev, engine);
        if (ret)
                return ret;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ret = intel_init_pipe_control(ring);
+       if (INTEL_GEN(dev_priv) >= 6) {
+               ret = intel_init_pipe_control(engine, 4096);
+               if (ret)
+                       return ret;
+       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+               ret = intel_init_pipe_control(engine, I830_WA_SIZE);
                if (ret)
                        return ret;
        }
@@ -2896,75 +2920,36 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+       struct intel_engine_cs *engine = &dev_priv->engine[VCS];
+
+       engine->name = "bsd ring";
+       engine->id = VCS;
+       engine->exec_id = I915_EXEC_BSD;
+       engine->hw_id = 1;
 
-       ring->name = "bsd ring";
-       ring->id = VCS;
-       ring->exec_id = I915_EXEC_BSD;
+       intel_ring_default_vfuncs(dev_priv, engine);
 
-       ring->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 6) {
-               ring->mmio_base = GEN6_BSD_RING_BASE;
+       if (INTEL_GEN(dev_priv) >= 6) {
+               engine->mmio_base = GEN6_BSD_RING_BASE;
                /* gen6 bsd needs a special wa for tail updates */
-               if (IS_GEN6(dev))
-                       ring->write_tail = gen6_bsd_ring_write_tail;
-               ring->flush = gen6_bsd_ring_flush;
-               ring->add_request = gen6_add_request;
-               ring->get_seqno = gen6_ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
-               if (INTEL_INFO(dev)->gen >= 8) {
-                       ring->irq_enable_mask =
+               if (IS_GEN6(dev_priv))
+                       engine->write_tail = gen6_bsd_ring_write_tail;
+               engine->flush = gen6_bsd_ring_flush;
+               if (INTEL_GEN(dev_priv) >= 8)
+                       engine->irq_enable_mask =
                                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-                       ring->irq_get = gen8_ring_get_irq;
-                       ring->irq_put = gen8_ring_put_irq;
-                       ring->dispatch_execbuffer =
-                               gen8_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               ring->semaphore.sync_to = gen8_ring_sync;
-                               ring->semaphore.signal = gen8_xcs_signal;
-                               GEN8_RING_SEMAPHORE_INIT;
-                       }
-               } else {
-                       ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-                       ring->irq_get = gen6_ring_get_irq;
-                       ring->irq_put = gen6_ring_put_irq;
-                       ring->dispatch_execbuffer =
-                               gen6_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               ring->semaphore.sync_to = gen6_ring_sync;
-                               ring->semaphore.signal = gen6_signal;
-                               ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-                               ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-                               ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-                               ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-                               ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                               ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-                               ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-                               ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-                               ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-                               ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-                       }
-               }
+               else
+                       engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
        } else {
-               ring->mmio_base = BSD_RING_BASE;
-               ring->flush = bsd_ring_flush;
-               ring->add_request = i9xx_add_request;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
-               if (IS_GEN5(dev)) {
-                       ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-                       ring->irq_get = gen5_ring_get_irq;
-                       ring->irq_put = gen5_ring_put_irq;
-               } else {
-                       ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-                       ring->irq_get = i9xx_ring_get_irq;
-                       ring->irq_put = i9xx_ring_put_irq;
-               }
-               ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+               engine->mmio_base = BSD_RING_BASE;
+               engine->flush = bsd_ring_flush;
+               if (IS_GEN5(dev_priv))
+                       engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+               else
+                       engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
        }
-       ring->init_hw = init_ring_common;
 
-       return intel_init_ring_buffer(dev, ring);
+       return intel_init_ring_buffer(dev, engine);
 }
 
 /**
@@ -2973,196 +2958,125 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
-       ring->name = "bsd2 ring";
-       ring->id = VCS2;
-       ring->exec_id = I915_EXEC_BSD;
-
-       ring->write_tail = ring_write_tail;
-       ring->mmio_base = GEN8_BSD2_RING_BASE;
-       ring->flush = gen6_bsd_ring_flush;
-       ring->add_request = gen6_add_request;
-       ring->get_seqno = gen6_ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
-       ring->irq_enable_mask =
+       struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+
+       engine->name = "bsd2 ring";
+       engine->id = VCS2;
+       engine->exec_id = I915_EXEC_BSD;
+       engine->hw_id = 4;
+       engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       engine->flush = gen6_bsd_ring_flush;
+       engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-       ring->irq_get = gen8_ring_get_irq;
-       ring->irq_put = gen8_ring_put_irq;
-       ring->dispatch_execbuffer =
-                       gen8_ring_dispatch_execbuffer;
-       if (i915_semaphore_is_enabled(dev)) {
-               ring->semaphore.sync_to = gen8_ring_sync;
-               ring->semaphore.signal = gen8_xcs_signal;
-               GEN8_RING_SEMAPHORE_INIT;
-       }
-       ring->init_hw = init_ring_common;
 
-       return intel_init_ring_buffer(dev, ring);
+       return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[BCS];
-
-       ring->name = "blitter ring";
-       ring->id = BCS;
-       ring->exec_id = I915_EXEC_BLT;
-
-       ring->mmio_base = BLT_RING_BASE;
-       ring->write_tail = ring_write_tail;
-       ring->flush = gen6_ring_flush;
-       ring->add_request = gen6_add_request;
-       ring->get_seqno = gen6_ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
-       if (INTEL_INFO(dev)->gen >= 8) {
-               ring->irq_enable_mask =
+       struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+
+       engine->name = "blitter ring";
+       engine->id = BCS;
+       engine->exec_id = I915_EXEC_BLT;
+       engine->hw_id = 2;
+       engine->mmio_base = BLT_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       engine->flush = gen6_ring_flush;
+       if (INTEL_GEN(dev_priv) >= 8)
+               engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-               ring->irq_get = gen8_ring_get_irq;
-               ring->irq_put = gen8_ring_put_irq;
-               ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       ring->semaphore.sync_to = gen8_ring_sync;
-                       ring->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT;
-               }
-       } else {
-               ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-               ring->irq_get = gen6_ring_get_irq;
-               ring->irq_put = gen6_ring_put_irq;
-               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       ring->semaphore.signal = gen6_signal;
-                       ring->semaphore.sync_to = gen6_ring_sync;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between BCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between BCS and VCS2 later.
-                        */
-                       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-                       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-                       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-                       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-                       ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-                       ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-                       ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-                       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       }
-       ring->init_hw = init_ring_common;
+       else
+               engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
-       return intel_init_ring_buffer(dev, ring);
+       return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+       struct intel_engine_cs *engine = &dev_priv->engine[VECS];
+
+       engine->name = "video enhancement ring";
+       engine->id = VECS;
+       engine->exec_id = I915_EXEC_VEBOX;
+       engine->hw_id = 3;
+       engine->mmio_base = VEBOX_RING_BASE;
 
-       ring->name = "video enhancement ring";
-       ring->id = VECS;
-       ring->exec_id = I915_EXEC_VEBOX;
+       intel_ring_default_vfuncs(dev_priv, engine);
 
-       ring->mmio_base = VEBOX_RING_BASE;
-       ring->write_tail = ring_write_tail;
-       ring->flush = gen6_ring_flush;
-       ring->add_request = gen6_add_request;
-       ring->get_seqno = gen6_ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
+       engine->flush = gen6_ring_flush;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               ring->irq_enable_mask =
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-               ring->irq_get = gen8_ring_get_irq;
-               ring->irq_put = gen8_ring_put_irq;
-               ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       ring->semaphore.sync_to = gen8_ring_sync;
-                       ring->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT;
-               }
        } else {
-               ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-               ring->irq_get = hsw_vebox_get_irq;
-               ring->irq_put = hsw_vebox_put_irq;
-               ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       ring->semaphore.sync_to = gen6_ring_sync;
-                       ring->semaphore.signal = gen6_signal;
-                       ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-                       ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-                       ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-                       ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-                       ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-                       ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-                       ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-                       ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
+               engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+               engine->irq_enable = hsw_vebox_irq_enable;
+               engine->irq_disable = hsw_vebox_irq_disable;
        }
-       ring->init_hw = init_ring_common;
 
-       return intel_init_ring_buffer(dev, ring);
+       return intel_init_ring_buffer(dev, engine);
 }
 
 int
 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (!ring->gpu_caches_dirty)
+       if (!engine->gpu_caches_dirty)
                return 0;
 
-       ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+       ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
        trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 int
 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t flush_domains;
        int ret;
 
        flush_domains = 0;
-       if (ring->gpu_caches_dirty)
+       if (engine->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
        trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_engine(struct intel_engine_cs *engine)
 {
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(engine))
                return;
 
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+       ret = intel_engine_idle(engine);
+       if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
+                         engine->name, ret);
 
-       stop_ring(ring);
+       stop_ring(engine);
 }