Merge branch 'topic/ppgtt' into drm-intel-next-queued
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
index d269ecf..bd5f4e7 100644 (file)
@@ -91,6 +91,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
               struct i915_address_space *vm,
               struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct list_head objects;
        int i, ret;
@@ -125,6 +126,20 @@ eb_lookup_vmas(struct eb_vmas *eb,
        i = 0;
        while (!list_empty(&objects)) {
                struct i915_vma *vma;
+               struct i915_address_space *bind_vm = vm;
+
+               if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
+                   USES_FULL_PPGTT(vm->dev)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               /* If we have secure dispatch, or the userspace assures us that
+                * they know what they're doing, use the GGTT VM.
+                */
+               if (((args->flags & I915_EXEC_SECURE) &&
+                   (i == (args->buffer_count - 1))))
+                       bind_vm = &dev_priv->gtt.base;
 
                obj = list_first_entry(&objects,
                                       struct drm_i915_gem_object,
@@ -138,7 +153,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
                if (IS_ERR(vma)) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
@@ -217,7 +232,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
                i915_gem_object_unpin_fence(obj);
 
        if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-               i915_gem_object_unpin(obj);
+               vma->pin_count--;
 
        entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -327,8 +342,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                   struct eb_vmas *eb,
-                                  struct drm_i915_gem_relocation_entry *reloc,
-                                  struct i915_address_space *vm)
+                                  struct drm_i915_gem_relocation_entry *reloc)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
@@ -352,8 +366,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
            !target_i915_obj->has_global_gtt_mapping)) {
-               i915_gem_gtt_bind_object(target_i915_obj,
-                                        target_i915_obj->cache_level);
+               struct i915_vma *vma =
+                       list_first_entry(&target_i915_obj->vma_list,
+                                        typeof(*vma), vma_link);
+               vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
@@ -451,8 +467,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
-                                                                vma->vm);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
                        if (ret)
                                return ret;
 
@@ -481,8 +496,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
        int i, ret;
 
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
-                                                        vma->vm);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
                if (ret)
                        return ret;
        }
@@ -527,11 +541,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                                struct intel_ring_buffer *ring,
                                bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
-       struct drm_i915_gem_object *obj = vma->obj;
+       u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+               !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
        int ret;
 
        need_fence =
@@ -560,14 +575,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                }
        }
 
-       /* Ensure ppgtt mapping exists if needed */
-       if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                      obj, obj->cache_level);
-
-               obj->has_aliasing_ppgtt_mapping = 1;
-       }
-
        if (entry->offset != vma->node.start) {
                entry->offset = vma->node.start;
                *need_reloc = true;
@@ -578,9 +585,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
-           !obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       vma->bind_vma(vma, obj->cache_level, flags);
 
        return 0;
 }
@@ -900,22 +905,27 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
        return 0;
 }
 
-static int
+static struct i915_hw_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-                         const u32 ctx_id)
+                         struct intel_ring_buffer *ring, const u32 ctx_id)
 {
+       struct i915_hw_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
 
-       hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
-       if (IS_ERR(hs))
-               return PTR_ERR(hs);
+       if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+               return ERR_PTR(-EINVAL);
+
+       ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+       if (IS_ERR(ctx))
+               return ctx;
 
+       hs = &ctx->hang_stats;
        if (hs->banned) {
                DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
-               return -EIO;
+               return ERR_PTR(-EIO);
        }
 
-       return 0;
+       return ctx;
 }
 
 static void
@@ -939,7 +949,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
-                       if (obj->pin_count) /* check for potential scanout */
+                       /* check for potential scanout */
+                       if (i915_gem_obj_ggtt_bound(obj) &&
+                           i915_gem_obj_to_ggtt(obj)->pin_count)
                                intel_mark_fb_busy(obj, ring);
                }
 
@@ -989,16 +1001,17 @@ static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
-                      struct drm_i915_gem_exec_object2 *exec,
-                      struct i915_address_space *vm)
+                      struct drm_i915_gem_exec_object2 *exec)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
+       struct i915_hw_context *ctx;
+       struct i915_address_space *vm;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-       u32 exec_start, exec_len;
+       u32 exec_start = args->batch_start_offset, exec_len;
        u32 mask, flags;
        int ret, mode, i;
        bool need_relocs;
@@ -1020,41 +1033,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->flags & I915_EXEC_IS_PINNED)
                flags |= I915_DISPATCH_PINNED;
 
-       switch (args->flags & I915_EXEC_RING_MASK) {
-       case I915_EXEC_DEFAULT:
-       case I915_EXEC_RENDER:
-               ring = &dev_priv->ring[RCS];
-               break;
-       case I915_EXEC_BSD:
-               ring = &dev_priv->ring[VCS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-       case I915_EXEC_BLT:
-               ring = &dev_priv->ring[BCS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-       case I915_EXEC_VEBOX:
-               ring = &dev_priv->ring[VECS];
-               if (ctx_id != DEFAULT_CONTEXT_ID) {
-                       DRM_DEBUG("Ring %s doesn't support contexts\n",
-                                 ring->name);
-                       return -EPERM;
-               }
-               break;
-
-       default:
+       if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
                DRM_DEBUG("execbuf with unknown ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
                return -EINVAL;
        }
+
+       if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
+               ring = &dev_priv->ring[RCS];
+       else
+               ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
+
        if (!intel_ring_initialized(ring)) {
                DRM_DEBUG("execbuf with invalid ring: %d\n",
                          (int)(args->flags & I915_EXEC_RING_MASK));
@@ -1136,11 +1125,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       ret = i915_gem_validate_context(dev, file, ctx_id);
-       if (ret) {
+       ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+       if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
+               ret = PTR_ERR(ctx);
                goto pre_mutex_err;
-       }
+       } 
+
+       i915_gem_context_reference(ctx);
+
+       vm = ctx->vm;
+       if (!USES_FULL_PPGTT(dev))
+               vm = &dev_priv->gtt.base;
 
        eb = eb_create(args);
        if (eb == NULL) {
@@ -1187,14 +1183,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
-       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+       if (flags & I915_DISPATCH_SECURE &&
+           !batch_obj->has_global_gtt_mapping) {
+               /* When we have multiple VMs, we'll need to make sure that we
+                * allocate space first */
+               struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+               BUG_ON(!vma);
+               vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+       }
+
+       if (flags & I915_DISPATCH_SECURE)
+               exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+       else
+               exec_start += i915_gem_obj_offset(batch_obj, vm);
 
        ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
                goto err;
 
-       ret = i915_switch_context(ring, file, ctx_id);
+       ret = i915_switch_context(ring, file, ctx);
        if (ret)
                goto err;
 
@@ -1219,8 +1226,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
        }
 
-       exec_start = i915_gem_obj_offset(batch_obj, vm) +
-               args->batch_start_offset;
+
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
@@ -1249,6 +1255,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
+       /* the request owns the ref now */
+       i915_gem_context_unreference(ctx);
        eb_destroy(eb);
 
        mutex_unlock(&dev->struct_mutex);
@@ -1270,7 +1278,6 @@ int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_execbuffer2 exec2;
        struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1326,8 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.flags = I915_EXEC_RENDER;
        i915_execbuffer2_set_context_id(exec2, 0);
 
-       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
-                                    &dev_priv->gtt.base);
+       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                for (i = 0; i < args->buffer_count; i++)
@@ -1353,7 +1359,6 @@ int
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
                     struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer2 *args = data;
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
@@ -1384,8 +1389,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EFAULT;
        }
 
-       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
-                                    &dev_priv->gtt.base);
+       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                ret = copy_to_user(to_user_ptr(args->buffers_ptr),