drm/i915: Remove stop-rings debugfs interface
[cascardo/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 245386e..3fafc35 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
 int __intel_ring_space(int head, int tail, int size)
 {
        int space = head - tail;
@@ -53,24 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
                                            ringbuf->tail, ringbuf->size);
 }
 
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
-{
-       intel_ring_update_space(ringbuf);
-       return ringbuf->space;
-}
-
-bool intel_engine_stopped(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-       return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
-}
-
 static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_engine_stopped(engine))
-               return;
        engine->write_tail(engine, ringbuf->tail);
 }
 
@@ -107,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      flush_domains)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        u32 cmd;
        int ret;
 
@@ -146,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                cmd |= MI_EXE_FLUSH;
 
        if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-           (IS_G4X(dev) || IS_GEN5(dev)))
+           (IS_G4X(req->i915) || IS_GEN5(req->i915)))
                cmd |= MI_INVALIDATE_ISP;
 
        ret = intel_ring_begin(req, 2);
@@ -432,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 static void ring_write_tail(struct intel_engine_cs *engine,
                            u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        I915_WRITE_TAIL(engine, value);
 }
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u64 acthd;
 
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_GEN(dev_priv) >= 8)
                acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
                                         RING_ACTHD_UDW(engine->mmio_base));
-       else if (INTEL_INFO(engine->dev)->gen >= 4)
+       else if (INTEL_GEN(dev_priv) >= 4)
                acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
@@ -454,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 addr;
 
        addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(engine->dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
        I915_WRITE(HWS_PGA, addr);
 }
 
 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        i915_reg_t mmio;
 
        /* The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN7(dev)) {
+       if (IS_GEN7(dev_priv)) {
                switch (engine->id) {
                case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
@@ -492,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                        mmio = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(engine->dev)) {
+       } else if (IS_GEN6(dev_priv)) {
                mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                /* XXX: gen8 returns to sanity */
@@ -509,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
         * arises: do we still need this and if so how should we go about
         * invalidating the TLB?
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+       if (IS_GEN(dev_priv, 6, 7)) {
                i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
                /* ring should be idle before issuing a sync flush*/
@@ -518,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                I915_WRITE(reg,
                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
                                              INSTPM_SYNC_FLUSH));
-               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, INSTPM_SYNC_FLUSH, 0,
+                                           1000))
                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
                                  engine->name);
        }
@@ -527,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 
 static bool stop_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(dev_priv)) {
                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+               if (intel_wait_for_register(dev_priv,
+                                           RING_MI_MODE(engine->mmio_base),
+                                           MODE_IDLE,
+                                           MODE_IDLE,
+                                           1000)) {
                        DRM_ERROR("%s : timed out trying to stop ring\n",
                                  engine->name);
                        /* Sometimes we observe that the idle flag is not
@@ -547,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
        I915_WRITE_HEAD(engine, 0);
        engine->write_tail(engine, 0);
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(dev_priv)) {
                (void)I915_READ_CTL(engine);
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
@@ -562,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 
 static int init_ring_common(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret = 0;
@@ -593,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev))
+       if (I915_NEED_GFX_HWS(dev_priv))
                intel_ring_setup_status_page(engine);
        else
                ring_setup_phys_status_page(engine);
@@ -647,59 +640,42 @@ out:
        return ret;
 }
 
-void
-intel_fini_pipe_control(struct intel_engine_cs *engine)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-
        if (engine->scratch.obj == NULL)
                return;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               kunmap(sg_page(engine->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(engine->scratch.obj);
-       }
-
+       i915_gem_object_ggtt_unpin(engine->scratch.obj);
        drm_gem_object_unreference(&engine->scratch.obj->base);
        engine->scratch.obj = NULL;
 }
 
-int
-intel_init_pipe_control(struct intel_engine_cs *engine)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 {
+       struct drm_i915_gem_object *obj;
        int ret;
 
        WARN_ON(engine->scratch.obj);
 
-       engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
-       if (engine->scratch.obj == NULL) {
-               DRM_ERROR("Failed to allocate seqno page\n");
-               ret = -ENOMEM;
+       obj = i915_gem_object_create_stolen(engine->i915->dev, size);
+       if (!obj)
+               obj = i915_gem_object_create(engine->i915->dev, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               ret = PTR_ERR(obj);
                goto err;
        }
 
-       ret = i915_gem_object_set_cache_level(engine->scratch.obj,
-                                             I915_CACHE_LLC);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
        if (ret)
                goto err_unref;
 
-       ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
-       if (ret)
-               goto err_unref;
-
-       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
-       engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
-       if (engine->scratch.cpu_page == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-
+       engine->scratch.obj = obj;
+       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
                         engine->name, engine->scratch.gtt_offset);
        return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(engine->scratch.obj);
 err_unref:
        drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
@@ -708,11 +684,9 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-       int ret, i;
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_workarounds *w = &dev_priv->workarounds;
+       struct i915_workarounds *w = &req->i915->workarounds;
+       int ret, i;
 
        if (w->count == 0)
                return 0;
@@ -801,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
                                 i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct i915_workarounds *wa = &dev_priv->workarounds;
        const uint32_t index = wa->hw_whitelist_count[engine->id];
 
@@ -817,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 
 static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -869,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
 
 static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -891,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
                          /* WaForceContextSaveRestoreNonCoherent:bdw */
                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
                          /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                         (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+                         (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
        return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -917,38 +888,39 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
 
 static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl */
+       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 
-       /* WaDisableKillLogic:bxt,skl */
+       /* WaDisableKillLogic:bxt,skl,kbl */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
                   ECOCHK_DIS_TLB);
 
-       /* WaClearFlowControlGpgpuContextSave:skl,bxt */
-       /* WaDisablePartialInstShootdown:skl,bxt */
+       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+       /* WaDisablePartialInstShootdown:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          FLOW_CONTROL_ENABLE |
                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
-       /* Syncing dependencies between camera and graphics:skl,bxt */
+       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
        /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                                  GEN9_DG_MIRROR_FIX_ENABLE);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
                /*
@@ -958,52 +930,78 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
                 */
        }
 
-       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
                          GEN9_ENABLE_YV12_BUGFIX |
                          GEN9_ENABLE_GPGPU_PREEMPTION);
 
-       /* Wa4x4STCOptimizationDisable:skl,bxt */
-       /* WaDisablePartialResolveInVc:skl,bxt */
+       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
        WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
                                         GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 
-       /* WaCcsTlbPrefetchDisable:skl,bxt */
+       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
        WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 
        /* WaDisableMaskBasedCammingInRCC:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
                                  PIXEL_MASK_CAMMING_DISABLE);
 
-       /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
-       tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-           IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
-               tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
-       WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+        * both tied to WaForceContextSaveRestoreNonCoherent
+        * in some hsds for skl. We keep the tie for all gen9. The
+        * documentation is a bit hazy and so we want to get common behaviour,
+        * even though there is no clear evidence we would need both on kbl/bxt.
+        * This area has been source of system hangs so we play it safe
+        * and mimic the skl regardless of what bspec says.
+        *
+        * Use Force Non-Coherent whenever executing a 3D context. This
+        * is a workaround for a possible hang in the unlikely event
+        * a TLB invalidation occurs during a PSD flush.
+        */
+
+       /* WaForceEnableNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT);
 
-       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-       if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+       /* WaDisableHDCInvalidation:skl,bxt,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  BDW_DISABLE_HDC_INVALIDATION);
+
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+       if (IS_SKYLAKE(dev_priv) ||
+           IS_KABYLAKE(dev_priv) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
-       /* WaDisableSTUnitPowerOptimization:skl,bxt */
+       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
-       /* WaOCLCoherentLineFlush:skl,bxt */
+       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+       if (ret)
+               return ret;
+
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
        ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
 
-       /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
        ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
        if (ret)
                return ret;
@@ -1013,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 
 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
@@ -1055,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 
 static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1068,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
         * until D0 which is the default case so this is equivalent to
         * !WaDisablePerCtxtPreemptionGranularityControl:skl
         */
-       if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
                I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
        }
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
                I915_WRITE(FF_SLICE_CS_CHICKEN2,
                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1082,50 +1078,37 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
         * involving this register should also be added to WA batch as required.
         */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
                /* WaDisableLSQCROPERFforOCL:skl */
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
 
        /* WaEnableGapsTsvCreditFix:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
                I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
                                           GEN9_GAPS_TSV_CREDIT_DISABLE));
        }
 
        /* WaDisablePowerCompilerClockGating:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       /* This is tied to WaForceContextSaveRestoreNonCoherent */
-       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
-               /*
-                *Use Force Non-Coherent whenever executing a 3D context. This
-                * is a workaround for a possible hang in the unlikely event
-                * a TLB invalidation occurs during a PSD flush.
-                */
-               /* WaForceEnableNonCoherent:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FORCE_NON_COHERENT);
-
-               /* WaDisableHDCInvalidation:skl */
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          BDW_DISABLE_HDC_INVALIDATION);
-       }
-
        /* WaBarrierPerformanceFixDisable:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE |
                                  HDC_BARRIER_PERFORMANCE_DISABLE);
 
        /* WaDisableSbeCacheDispatchPortSharing:skl */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaDisableGafsUnitClkGating:skl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
        /* WaDisableLSQCROPERFforOCL:skl */
        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
@@ -1136,9 +1119,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 
 static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1146,11 +1128,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
        /* WaStoreMultiplePTEenable:bxt */
        /* This is a requirement according to Hardware specification */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
        /* WaSetClckGatingDisableMedia:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
                                            ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
        }
@@ -1159,8 +1141,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          STALL_DOP_GATING_DISABLE);
 
+       /* WaDisablePooledEuLoadBalancingFix:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+                                 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
        /* WaDisableSbeCacheDispatchPortSharing:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1170,7 +1158,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
        /* WaDisableLSQCROPERFforOCL:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
                if (ret)
                        return ret;
@@ -1180,44 +1168,107 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
                        return ret;
        }
 
+       /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+       /* WaInsertDummyPushConstPs:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaEnableGapsTsvCreditFix:kbl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableDynamicCreditSharing:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT(GAMT_CHKN_BIT_REG,
+                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE);
+
+       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+        * involving this register should also be added to WA batch as required.
+        */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+               /* WaDisableLSQCROPERFforOCL:kbl */
+               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+                          GEN8_LQSC_RO_PERF_DIS);
+
+       /* WaInsertDummyPushConstPs:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaDisableGafsUnitClkGating:kbl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:kbl */
+       WA_SET_BIT_MASKED(
+               GEN7_HALF_SLICE_CHICKEN1,
+               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
 int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        WARN_ON(engine->id != RCS);
 
        dev_priv->workarounds.count = 0;
        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-       if (IS_BROADWELL(dev))
+       if (IS_BROADWELL(dev_priv))
                return bdw_init_workarounds(engine);
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                return chv_init_workarounds(engine);
 
-       if (IS_SKYLAKE(dev))
+       if (IS_SKYLAKE(dev_priv))
                return skl_init_workarounds(engine);
 
-       if (IS_BROXTON(dev))
+       if (IS_BROXTON(dev_priv))
                return bxt_init_workarounds(engine);
 
+       if (IS_KABYLAKE(dev_priv))
+               return kbl_init_workarounds(engine);
+
        return 0;
 }
 
 static int init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret = init_ring_common(engine);
        if (ret)
                return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-       if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+       if (IS_GEN(dev_priv, 4, 6))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
        /* We need to disable the AsyncFlip performance optimisations in order
@@ -1226,22 +1277,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
        /* WaEnableFlushTlbInvalidationMode:snb */
-       if (INTEL_INFO(dev)->gen == 6)
+       if (IS_GEN6(dev_priv))
                I915_WRITE(GFX_MODE,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-       if (IS_GEN7(dev))
+       if (IS_GEN7(dev_priv))
                I915_WRITE(GFX_MODE_GEN7,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev_priv)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
                 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1251,19 +1302,17 @@ static int init_render_ring(struct intel_engine_cs *engine)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_DPF(dev))
-               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 
        return init_workarounds_ring(engine);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (dev_priv->semaphore_obj) {
                i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1279,13 +1328,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1294,22 +1342,20 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
-                                          PIPE_CONTROL_FLUSH_ENABLE);
+                                          PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->id));
+                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
        }
 
@@ -1321,13 +1367,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1336,20 +1381,18 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-                                          MI_SEMAPHORE_TARGET(waiter->id));
+                                          MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
        }
 
@@ -1360,14 +1403,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                       unsigned int num_dwords)
 {
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *useless;
        enum intel_engine_id id;
        int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1379,11 +1421,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1419,17 +1459,45 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, MI_USER_INTERRUPT);
+       __intel_ring_advance(engine);
+
+       return 0;
+}
+
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->engine;
+       int ret;
+
+       if (engine->semaphore.signal)
+               ret = engine->semaphore.signal(req, 8);
+       else
+               ret = intel_ring_begin(req, 8);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                PIPE_CONTROL_CS_STALL |
+                                PIPE_CONTROL_QW_WRITE));
+       intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+       intel_ring_emit(engine, 0);
        intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       /* We're thrashing one dword of HWS. */
+       intel_ring_emit(engine, 0);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
+       intel_ring_emit(engine, MI_NOOP);
        __intel_ring_advance(engine);
 
        return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
                                              u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        return dev_priv->last_seqno < seqno;
 }
 
@@ -1447,7 +1515,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               u32 seqno)
 {
        struct intel_engine_cs *waiter = waiter_req->engine;
-       struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+       struct drm_i915_private *dev_priv = waiter_req->i915;
+       u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+       struct i915_hw_ppgtt *ppgtt;
        int ret;
 
        ret = intel_ring_begin(waiter_req, 4);
@@ -1456,14 +1526,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 
        intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
                                MI_SEMAPHORE_GLOBAL_GTT |
-                               MI_SEMAPHORE_POLL |
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-       intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+       intel_ring_emit(waiter, lower_32_bits(offset));
+       intel_ring_emit(waiter, upper_32_bits(offset));
        intel_ring_advance(waiter);
+
+       /* When the !RCS engines idle waiting upon a semaphore, they lose their
+        * pagetables and we must reload them before executing the batch.
+        * We do this on the i915_switch_context() following the wait and
+        * before the dispatch.
+        */
+       ppgtt = waiter_req->ctx->ppgtt;
+       if (ppgtt && waiter_req->engine->id != RCS)
+               ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
        return 0;
 }
 
@@ -1492,7 +1568,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
                return ret;
 
        /* If seqno wrap happened, omit the wait with no-ops */
-       if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+       if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
                intel_ring_emit(waiter, dw1 | wait_mbox);
                intel_ring_emit(waiter, seqno);
                intel_ring_emit(waiter, 0);
@@ -1508,71 +1584,29 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
        return 0;
 }
 
-#define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
-do {                                                                   \
-       intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
-                PIPE_CONTROL_DEPTH_STALL);                             \
-       intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
-       intel_ring_emit(ring__, 0);                                                     \
-       intel_ring_emit(ring__, 0);                                                     \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
 {
-       struct intel_engine_cs *engine = req->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       int ret;
-
-       /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
-        * incoherent with writes to memory, i.e. completely fubar,
-        * so we need to use PIPE_NOTIFY instead.
+       /* MI_STORE are internally buffered by the GPU and not flushed
+        * either by MI_FLUSH or SyncFlush or any other combination of
+        * MI commands.
         *
-        * However, we also need to workaround the qword write
-        * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
-        * memory before requesting an interrupt.
+        * "Only the submission of the store operation is guaranteed.
+        * The write result will be complete (coherent) some time later
+        * (this is practically a finite period but there is no guaranteed
+        * latency)."
+        *
+        * Empirically, we observe that we need a delay of at least 75us to
+        * be sure that the seqno write is visible by the CPU.
         */
-       ret = intel_ring_begin(req, 32);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
-                       PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       __intel_ring_advance(engine);
-
-       return 0;
+       usleep_range(125, 250);
 }
 
 static void
 gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
+
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
         * ACTHD) before reading the status page.
@@ -1584,138 +1618,63 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
         * the write time to land, but that would incur a delay after every
         * batch i.e. much more frequent than a delay when waiting for the
         * interrupt (with the same net latency).
+        *
+        * Also note that to prevent whole machine hangs on gen7, we have to
+        * take the spinlock to guard against concurrent cacheline access.
         */
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       spin_lock_irq(&dev_priv->uncore.lock);
        POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
-}
-
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
 static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
+gen5_irq_enable(struct intel_engine_cs *engine)
 {
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
-{
-       return engine->scratch.cpu_page[0];
+       gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       engine->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+gen5_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0)
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0)
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i9xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
-{
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
+       POSTING_READ16(RING_IMR(engine->mmio_base));
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
 }
 
 static int
@@ -1749,135 +1708,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
        return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_PARITY_ERROR(dev)));
-               else
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
-                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
-               else
-                       I915_WRITE_IMR(engine, ~0);
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+       gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               I915_WRITE_IMR(engine, ~0);
-               gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~0);
+       gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-               } else {
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-               } else {
-                       I915_WRITE_IMR(engine, ~0);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int
@@ -1991,12 +1883,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah)
                return;
 
-       drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+       drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
        engine->status_page.page_addr = NULL;
 }
 
@@ -2022,10 +1914,10 @@ static int init_status_page(struct intel_engine_cs *engine)
                unsigned flags;
                int ret;
 
-               obj = i915_gem_alloc_object(engine->dev, 4096);
-               if (obj == NULL) {
+               obj = i915_gem_object_create(engine->i915->dev, 4096);
+               if (IS_ERR(obj)) {
                        DRM_ERROR("Failed to allocate status page\n");
-                       return -ENOMEM;
+                       return PTR_ERR(obj);
                }
 
                ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +1925,7 @@ static int init_status_page(struct intel_engine_cs *engine)
                        goto err_unref;
 
                flags = 0;
-               if (!HAS_LLC(engine->dev))
+               if (!HAS_LLC(engine->i915))
                        /* On g33, we cannot place HWS above 256MiB, so
                         * restrict its pinning to the low mappable arena.
                         * Though this restriction is not documented for
@@ -2067,11 +1959,11 @@ err_unref:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
-                       drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+                       drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
                if (!dev_priv->status_page_dmah)
                        return -ENOMEM;
        }
@@ -2084,20 +1976,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
+       GEM_BUG_ON(ringbuf->vma == NULL);
+       GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
        if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
                i915_gem_object_unpin_map(ringbuf->obj);
        else
-               iounmap(ringbuf->virtual_start);
+               i915_vma_unpin_iomap(ringbuf->vma);
        ringbuf->virtual_start = NULL;
-       ringbuf->vma = NULL;
+
        i915_gem_object_ggtt_unpin(ringbuf->obj);
+       ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
                                     struct intel_ringbuffer *ringbuf)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
        unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2025,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               addr = ioremap_wc(ggtt->mappable_base +
-                                 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (addr == NULL) {
-                       ret = -ENOMEM;
+               addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
                        goto err_unpin;
                }
        }
@@ -2163,9 +2056,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        if (!HAS_LLC(dev))
                obj = i915_gem_object_create_stolen(dev, ringbuf->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ringbuf->size);
-       if (obj == NULL)
-               return -ENOMEM;
+               obj = i915_gem_object_create(dev, ringbuf->size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
@@ -2197,13 +2090,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
         * of the buffer.
         */
        ring->effective_size = size;
-       if (IS_I830(engine->dev) || IS_845G(engine->dev))
+       if (IS_I830(engine->i915) || IS_845G(engine->i915))
                ring->effective_size -= 2 * CACHELINE_BYTES;
 
        ring->last_retired_head = -1;
        intel_ring_update_space(ring);
 
-       ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+       ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
                                 engine->name, ret);
@@ -2223,15 +2116,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
        kfree(ring);
 }
 
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+       int ret;
+
+       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+
+       if (ce->pin_count++)
+               return 0;
+
+       if (ce->state) {
+               ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+               if (ret)
+                       goto error;
+       }
+
+       /* The kernel context is only used as a placeholder for flushing the
+        * active context. It is never used for submitting user rendering and
+        * as such never requires the golden render context, and so we can skip
+        * emitting it when we switch to the kernel context. This is required
+        * as during eviction we cannot allocate and pin the renderstate in
+        * order to initialise the context.
+        */
+       if (ctx == ctx->i915->kernel_context)
+               ce->initialised = true;
+
+       i915_gem_context_reference(ctx);
+       return 0;
+
+error:
+       ce->pin_count = 0;
+       return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+                                    struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+
+       lockdep_assert_held(&ctx->i915->dev->struct_mutex);
+
+       if (--ce->pin_count)
+               return;
+
+       if (ce->state)
+               i915_gem_object_ggtt_unpin(ce->state);
+
+       i915_gem_context_unreference(ctx);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_ringbuffer *ringbuf;
        int ret;
 
        WARN_ON(engine->buffer);
 
-       engine->dev = dev;
+       engine->i915 = dev_priv;
        INIT_LIST_HEAD(&engine->active_list);
        INIT_LIST_HEAD(&engine->request_list);
        INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2240,7 +2185,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       init_waitqueue_head(&engine->irq_queue);
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
+
+       /* We may need to do things with the shrinker which
+        * require us to immediately switch back to the default
+        * context. This can cause a problem as pinning the
+        * default context also requires GTT space which may not
+        * be available. To avoid this we always pin the default
+        * context.
+        */
+       ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+       if (ret)
+               goto error;
 
        ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
@@ -2249,7 +2207,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        }
        engine->buffer = ringbuf;
 
-       if (I915_NEED_GFX_HWS(dev)) {
+       if (I915_NEED_GFX_HWS(dev_priv)) {
                ret = init_status_page(engine);
                if (ret)
                        goto error;
@@ -2260,7 +2218,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        goto error;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
                                engine->name, ret);
@@ -2286,11 +2244,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        if (!intel_engine_initialized(engine))
                return;
 
-       dev_priv = to_i915(engine->dev);
+       dev_priv = engine->i915;
 
        if (engine->buffer) {
                intel_stop_engine(engine);
-               WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+               WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
                intel_unpin_ringbuffer_obj(engine->buffer);
                intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2258,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(engine->dev)) {
+       if (I915_NEED_GFX_HWS(dev_priv)) {
                cleanup_status_page(engine);
        } else {
                WARN_ON(engine->id != RCS);
@@ -2309,52 +2267,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 
        i915_cmd_parser_fini_ring(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
-       engine->dev = NULL;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
-{
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       struct drm_i915_gem_request *request;
-       unsigned space;
-       int ret;
-
-       if (intel_ring_space(ringbuf) >= n)
-               return 0;
-
-       /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
+       intel_engine_fini_breadcrumbs(engine);
 
-       list_for_each_entry(request, &engine->request_list, list) {
-               space = __intel_ring_space(request->postfix, ringbuf->tail,
-                                          ringbuf->size);
-               if (space >= n)
-                       break;
-       }
+       intel_ring_context_unpin(dev_priv->kernel_context, engine);
 
-       if (WARN_ON(&request->list == &engine->request_list))
-               return -ENOSPC;
-
-       ret = i915_wait_request(request);
-       if (ret)
-               return ret;
-
-       ringbuf->space = space;
-       return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
-       uint32_t __iomem *virt;
-       int rem = ringbuf->size - ringbuf->tail;
-
-       virt = ringbuf->virtual_start + ringbuf->tail;
-       rem /= 4;
-       while (rem--)
-               iowrite32(MI_NOOP, virt++);
-
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
+       engine->i915 = NULL;
 }
 
 int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2377,84 +2294,79 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       request->ringbuf = request->engine->buffer;
-       return 0;
-}
+       int ret;
 
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-       /*
-        * The first call merely notes the reserve request and is common for
-        * all back ends. The subsequent localised _begin() call actually
-        * ensures that the reservation is available. Without the begin, if
-        * the request creator immediately submitted the request without
-        * adding any commands to it then there might not actually be
-        * sufficient room for the submission commands.
+       /* Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
         */
-       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+       request->reserved_space += LEGACY_REQUEST_SIZE;
 
-       return intel_ring_begin(request, 0);
-}
+       request->ringbuf = request->engine->buffer;
 
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
-       WARN_ON(ringbuf->reserved_size);
-       WARN_ON(ringbuf->reserved_in_use);
+       ret = intel_ring_begin(request, 0);
+       if (ret)
+               return ret;
 
-       ringbuf->reserved_size = size;
+       request->reserved_space -= LEGACY_REQUEST_SIZE;
+       return 0;
 }
 
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-       WARN_ON(ringbuf->reserved_in_use);
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_engine_cs *engine = req->engine;
+       struct drm_i915_gem_request *target;
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
-}
+       intel_ring_update_space(ringbuf);
+       if (ringbuf->space >= bytes)
+               return 0;
 
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
-       WARN_ON(ringbuf->reserved_in_use);
+       /*
+        * Space is reserved in the ringbuffer for finalising the request,
+        * as that cannot be allowed to fail. During request finalisation,
+        * reserved_space is set to 0 to stop the overallocation and the
+        * assumption is that then we never need to wait (which has the
+        * risk of failing with EINTR).
+        *
+        * See also i915_gem_request_alloc() and i915_add_request().
+        */
+       GEM_BUG_ON(!req->reserved_space);
 
-       ringbuf->reserved_in_use = true;
-       ringbuf->reserved_tail   = ringbuf->tail;
-}
+       list_for_each_entry(target, &engine->request_list, list) {
+               unsigned space;
 
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
-       WARN_ON(!ringbuf->reserved_in_use);
-       if (ringbuf->tail > ringbuf->reserved_tail) {
-               WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
-                    "request reserved size too small: %d vs %d!\n",
-                    ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
-       } else {
                /*
-                * The ring was wrapped while the reserved space was in use.
-                * That means that some unknown amount of the ring tail was
-                * no-op filled and skipped. Thus simply adding the ring size
-                * to the tail and doing the above space check will not work.
-                * Rather than attempt to track how much tail was skipped,
-                * it is much simpler to say that also skipping the sanity
-                * check every once in a while is not a big issue.
+                * The request queue is per-engine, so can contain requests
+                * from multiple ringbuffers. Here, we must ignore any that
+                * aren't from the ringbuffer we're considering.
                 */
+               if (target->ringbuf != ringbuf)
+                       continue;
+
+               /* Would completion of this request free enough space? */
+               space = __intel_ring_space(target->postfix, ringbuf->tail,
+                                          ringbuf->size);
+               if (space >= bytes)
+                       break;
        }
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
+       if (WARN_ON(&target->list == &engine->request_list))
+               return -ENOSPC;
+
+       return i915_wait_request(target);
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        int remain_actual = ringbuf->size - ringbuf->tail;
-       int ret, total_bytes, wait_bytes = 0;
+       int remain_usable = ringbuf->effective_size - ringbuf->tail;
+       int bytes = num_dwords * sizeof(u32);
+       int total_bytes, wait_bytes;
        bool need_wrap = false;
 
-       if (ringbuf->reserved_in_use)
-               total_bytes = bytes;
-       else
-               total_bytes = bytes + ringbuf->reserved_size;
+       total_bytes = bytes + req->reserved_space;
 
        if (unlikely(bytes > remain_usable)) {
                /*
@@ -2463,44 +2375,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
                 */
                wait_bytes = remain_actual + total_bytes;
                need_wrap = true;
+       } else if (unlikely(total_bytes > remain_usable)) {
+               /*
+                * The base request will fit but the reserved space
+                * falls off the end. So we don't need an immediate wrap
+                * and only need to effectively wait for the reserved
+                * size space from the start of ringbuffer.
+                */
+               wait_bytes = remain_actual + req->reserved_space;
        } else {
-               if (unlikely(total_bytes > remain_usable)) {
-                       /*
-                        * The base request will fit but the reserved space
-                        * falls off the end. So don't need an immediate wrap
-                        * and only need to effectively wait for the reserved
-                        * size space from the start of ringbuffer.
-                        */
-                       wait_bytes = remain_actual + ringbuf->reserved_size;
-               } else if (total_bytes > ringbuf->space) {
-                       /* No wrapping required, just waiting. */
-                       wait_bytes = total_bytes;
-               }
+               /* No wrapping required, just waiting. */
+               wait_bytes = total_bytes;
        }
 
-       if (wait_bytes) {
-               ret = ring_wait_for_space(engine, wait_bytes);
+       if (wait_bytes > ringbuf->space) {
+               int ret = wait_for_space(req, wait_bytes);
                if (unlikely(ret))
                        return ret;
 
-               if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
+               intel_ring_update_space(ringbuf);
+               if (unlikely(ringbuf->space < wait_bytes))
+                       return -EAGAIN;
        }
 
-       return 0;
-}
-
-int intel_ring_begin(struct drm_i915_gem_request *req,
-                    int num_dwords)
-{
-       struct intel_engine_cs *engine = req->engine;
-       int ret;
+       if (unlikely(need_wrap)) {
+               GEM_BUG_ON(remain_actual > ringbuf->space);
+               GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
 
-       ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
-       if (ret)
-               return ret;
+               /* Fill the tail with MI_NOOP */
+               memset(ringbuf->virtual_start + ringbuf->tail,
+                      0, remain_actual);
+               ringbuf->tail = 0;
+               ringbuf->space -= remain_actual;
+       }
 
-       engine->buffer->space -= num_dwords * sizeof(uint32_t);
+       ringbuf->space -= bytes;
+       GEM_BUG_ON(ringbuf->space < 0);
        return 0;
 }
 
@@ -2529,7 +2439,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
        /* Our semaphore implementation is strictly monotonic (i.e. we proceed
         * so long as the semaphore value in the register/page is greater
@@ -2539,7 +2449,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
         * the semaphore value, then when the seqno moves backwards all
         * future waits will complete instantly (causing rendering corruption).
         */
-       if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
                I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
                if (HAS_VEBOX(dev_priv))
@@ -2556,43 +2466,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       engine->set_seqno(engine, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
        engine->last_submitted_seqno = seqno;
 
        engine->hangcheck.seqno = seqno;
+
+       /* After manually advancing the seqno, fake the interrupt in case
+        * there are any waiters for that seqno.
+        */
+       rcu_read_lock();
+       intel_engine_wakeup(engine);
+       rcu_read_unlock();
 }
 
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                                     u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* Every tail move must follow the sequence below */
 
        /* Disable notification that the ring is IDLE. The GT
         * will then assume that it is busy and bring it out of rc6.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
        /* Clear the context id. Here be magic! */
-       I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+       I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
 
        /* Wait for the ring not to be idle, i.e. for it to wake up. */
-       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-                     GEN6_BSD_SLEEP_INDICATOR) == 0,
-                    50))
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_BSD_SLEEP_PSMI_CONTROL,
+                                      GEN6_BSD_SLEEP_INDICATOR,
+                                      0,
+                                      50))
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_TAIL(engine, value);
-       POSTING_READ(RING_TAIL(engine->mmio_base));
+       I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+       POSTING_READ_FW(RING_TAIL(engine->mmio_base));
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2607,7 +2532,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2629,7 +2554,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(engine->dev)->gen >= 8) {
+       if (INTEL_GEN(req->i915) >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
@@ -2720,7 +2645,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        uint32_t cmd;
        int ret;
 
@@ -2729,7 +2653,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2750,7 +2674,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(req->i915) >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
@@ -2762,152 +2686,210 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
        return 0;
 }
 
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+                                      struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_object *obj;
+       int ret, i;
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+               obj = i915_gem_object_create(dev_priv->dev, 4096);
+               if (IS_ERR(obj)) {
+                       DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+                       i915.semaphores = 0;
+               } else {
+                       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+                       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(&obj->base);
+                               DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+                               i915.semaphores = 0;
+                       } else {
+                               dev_priv->semaphore_obj = obj;
+                       }
+               }
+       }
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+               engine->semaphore.sync_to = gen8_ring_sync;
+               engine->semaphore.signal = gen8_xcs_signal;
+
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       u64 ring_offset;
+
+                       if (i != engine->id)
+                               ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+                       else
+                               ring_offset = MI_SEMAPHORE_SYNC_INVALID;
+
+                       engine->semaphore.signal_ggtt[i] = ring_offset;
+               }
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->semaphore.sync_to = gen6_ring_sync;
+               engine->semaphore.signal = gen6_signal;
+
+               /*
+                * The current semaphore is only applied on pre-gen8
+                * platform.  And there is no VCS2 ring on the pre-gen8
+                * platform. So the semaphore between RCS and VCS2 is
+                * initialized as INVALID.  Gen8 will initialize the
+                * sema between VCS2 and RCS later.
+                */
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       static const struct {
+                               u32 wait_mbox;
+                               i915_reg_t mbox_reg;
+                       } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+                               [RCS] = {
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+                               },
+                               [VCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+                               },
+                               [BCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+                               },
+                               [VECS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+                               },
+                       };
+                       u32 wait_mbox;
+                       i915_reg_t mbox_reg;
+
+                       if (i == engine->id || i == VCS2) {
+                               wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+                               mbox_reg = GEN6_NOSYNC;
+                       } else {
+                               wait_mbox = sem_data[engine->id][i].wait_mbox;
+                               mbox_reg = sem_data[engine->id][i].mbox_reg;
+                       }
+
+                       engine->semaphore.mbox.wait[i] = wait_mbox;
+                       engine->semaphore.mbox.signal[i] = mbox_reg;
+               }
+       }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+                               struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->irq_enable = gen8_irq_enable;
+               engine->irq_disable = gen8_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->irq_enable = gen6_irq_enable;
+               engine->irq_disable = gen6_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 5) {
+               engine->irq_enable = gen5_irq_enable;
+               engine->irq_disable = gen5_irq_disable;
+               engine->irq_seqno_barrier = gen5_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               engine->irq_enable = i9xx_irq_enable;
+               engine->irq_disable = i9xx_irq_disable;
+       } else {
+               engine->irq_enable = i8xx_irq_enable;
+               engine->irq_disable = i8xx_irq_disable;
+       }
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+                                     struct intel_engine_cs *engine)
+{
+       engine->init_hw = init_ring_common;
+       engine->write_tail = ring_write_tail;
+
+       engine->add_request = i9xx_add_request;
+       if (INTEL_GEN(dev_priv) >= 6)
+               engine->add_request = gen6_add_request;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 6)
+               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
+       intel_ring_init_irq(dev_priv, engine);
+       intel_ring_init_semaphores(dev_priv, engine);
+}
+
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
-       struct drm_i915_gem_object *obj;
        int ret;
 
        engine->name = "render ring";
        engine->id = RCS;
        engine->exec_id = I915_EXEC_RENDER;
+       engine->hw_id = 0;
        engine->mmio_base = RENDER_RING_BASE;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               if (i915_semaphore_is_enabled(dev)) {
-                       obj = i915_gem_alloc_object(dev, 4096);
-                       if (obj == NULL) {
-                               DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-                               i915.semaphores = 0;
-                       } else {
-                               i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-                               ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-                               if (ret != 0) {
-                                       drm_gem_object_unreference(&obj->base);
-                                       DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-                                       i915.semaphores = 0;
-                               } else
-                                       dev_priv->semaphore_obj = obj;
-                       }
-               }
+       intel_ring_default_vfuncs(dev_priv, engine);
 
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+       if (HAS_L3_DPF(dev_priv))
+               engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen6_add_request;
+               engine->add_request = gen8_render_add_request;
                engine->flush = gen8_render_ring_flush;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       WARN_ON(!dev_priv->semaphore_obj);
-                       engine->semaphore.sync_to = gen8_ring_sync;
+               if (i915_semaphore_is_enabled(dev_priv))
                        engine->semaphore.signal = gen8_rcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen6_add_request;
                engine->flush = gen7_render_ring_flush;
-               if (INTEL_INFO(dev)->gen == 6)
+               if (IS_GEN6(dev_priv))
                        engine->flush = gen6_render_ring_flush;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between RCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between VCS2 and RCS later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       } else if (IS_GEN5(dev)) {
-               engine->add_request = pc_render_add_request;
+       } else if (IS_GEN5(dev_priv)) {
                engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = pc_render_get_seqno;
-               engine->set_seqno = pc_render_set_seqno;
-               engine->irq_get = gen5_ring_get_irq;
-               engine->irq_put = gen5_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
-                                       GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
        } else {
-               engine->add_request = i9xx_add_request;
-               if (INTEL_INFO(dev)->gen < 4)
+               if (INTEL_GEN(dev_priv) < 4)
                        engine->flush = gen2_render_ring_flush;
                else
                        engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN2(dev)) {
-                       engine->irq_get = i8xx_ring_get_irq;
-                       engine->irq_put = i8xx_ring_put_irq;
-               } else {
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
                engine->irq_enable_mask = I915_USER_INTERRUPT;
        }
-       engine->write_tail = ring_write_tail;
 
-       if (IS_HASWELL(dev))
+       if (IS_HASWELL(dev_priv))
                engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
-       else if (IS_GEN8(dev))
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 6)
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 4)
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev) || IS_845G(dev))
-               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
-       else
-               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
        engine->init_hw = init_render_ring;
        engine->cleanup = render_ring_cleanup;
 
-       /* Workaround batchbuffer to combat CS tlb bug. */
-       if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
-               if (obj == NULL) {
-                       DRM_ERROR("Failed to allocate batch bo\n");
-                       return -ENOMEM;
-               }
-
-               ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-               if (ret != 0) {
-                       drm_gem_object_unreference(&obj->base);
-                       DRM_ERROR("Failed to ping batch bo\n");
-                       return ret;
-               }
-
-               engine->scratch.obj = obj;
-               engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
-       }
-
        ret = intel_init_ring_buffer(dev, engine);
        if (ret)
                return ret;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ret = intel_init_pipe_control(engine);
+       if (INTEL_GEN(dev_priv) >= 6) {
+               ret = intel_init_pipe_control(engine, 4096);
+               if (ret)
+                       return ret;
+       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+               ret = intel_init_pipe_control(engine, I830_WA_SIZE);
                if (ret)
                        return ret;
        }
@@ -2923,69 +2905,29 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        engine->name = "bsd ring";
        engine->id = VCS;
        engine->exec_id = I915_EXEC_BSD;
+       engine->hw_id = 1;
 
-       engine->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 6) {
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       if (INTEL_GEN(dev_priv) >= 6) {
                engine->mmio_base = GEN6_BSD_RING_BASE;
                /* gen6 bsd needs a special wa for tail updates */
-               if (IS_GEN6(dev))
+               if (IS_GEN6(dev_priv))
                        engine->write_tail = gen6_bsd_ring_write_tail;
                engine->flush = gen6_bsd_ring_flush;
-               engine->add_request = gen6_add_request;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (INTEL_INFO(dev)->gen >= 8) {
+               if (INTEL_GEN(dev_priv) >= 8)
                        engine->irq_enable_mask =
                                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-                       engine->irq_get = gen8_ring_get_irq;
-                       engine->irq_put = gen8_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen8_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               engine->semaphore.sync_to = gen8_ring_sync;
-                               engine->semaphore.signal = gen8_xcs_signal;
-                               GEN8_RING_SEMAPHORE_INIT(engine);
-                       }
-               } else {
+               else
                        engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen6_ring_get_irq;
-                       engine->irq_put = gen6_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen6_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               engine->semaphore.sync_to = gen6_ring_sync;
-                               engine->semaphore.signal = gen6_signal;
-                               engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-                               engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-                               engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-                               engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-                               engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-                               engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-                               engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-                               engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-                       }
-               }
        } else {
                engine->mmio_base = BSD_RING_BASE;
                engine->flush = bsd_ring_flush;
-               engine->add_request = i9xx_add_request;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN5(dev)) {
+               if (IS_GEN5(dev_priv))
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen5_ring_get_irq;
-                       engine->irq_put = gen5_ring_put_irq;
-               } else {
+               else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
@@ -3001,26 +2943,14 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
        engine->name = "bsd2 ring";
        engine->id = VCS2;
        engine->exec_id = I915_EXEC_BSD;
-
-       engine->write_tail = ring_write_tail;
+       engine->hw_id = 4;
        engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_bsd_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
        engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-       engine->irq_get = gen8_ring_get_irq;
-       engine->irq_put = gen8_ring_put_irq;
-       engine->dispatch_execbuffer =
-                       gen8_ring_dispatch_execbuffer;
-       if (i915_semaphore_is_enabled(dev)) {
-               engine->semaphore.sync_to = gen8_ring_sync;
-               engine->semaphore.signal = gen8_xcs_signal;
-               GEN8_RING_SEMAPHORE_INIT(engine);
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
@@ -3033,53 +2963,17 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
        engine->name = "blitter ring";
        engine->id = BCS;
        engine->exec_id = I915_EXEC_BLT;
-
+       engine->hw_id = 2;
        engine->mmio_base = BLT_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8)
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
-       } else {
+       else
                engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between BCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between BCS and VCS2 later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
@@ -3092,47 +2986,21 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        engine->name = "video enhancement ring";
        engine->id = VECS;
        engine->exec_id = I915_EXEC_VEBOX;
-
+       engine->hw_id = 3;
        engine->mmio_base = VEBOX_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
        } else {
                engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-               engine->irq_get = hsw_vebox_get_irq;
-               engine->irq_put = hsw_vebox_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
+               engine->irq_enable = hsw_vebox_irq_enable;
+               engine->irq_disable = hsw_vebox_irq_disable;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }