drm/amdgpu: fix and cleanup user fence handling v2
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
index 9ab2f08..2bbeeb0 100644 (file)
@@ -87,33 +87,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 }
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
-                                     struct amdgpu_user_fence *uf,
-                                     struct drm_amdgpu_cs_chunk_fence *fence_data)
+                                     struct drm_amdgpu_cs_chunk_fence *data,
+                                     uint32_t *offset)
 {
        struct drm_gem_object *gobj;
-       uint32_t handle;
 
-       handle = fence_data->handle;
        gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
-                                    fence_data->handle);
+                                    data->handle);
        if (gobj == NULL)
                return -EINVAL;
 
-       uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-       uf->offset = fence_data->offset;
-
-       if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
-               drm_gem_object_unreference_unlocked(gobj);
-               return -EINVAL;
-       }
-
-       p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
+       p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
        p->uf_entry.priority = 0;
        p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
+       *offset = data->offset;
 
        drm_gem_object_unreference_unlocked(gobj);
+
+       if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+               amdgpu_bo_unref(&p->uf_entry.robj);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -124,8 +121,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        union drm_amdgpu_cs *cs = data;
        uint64_t *chunk_array_user;
        uint64_t *chunk_array;
-       struct amdgpu_user_fence uf = {};
        unsigned size, num_ibs = 0;
+       uint32_t uf_offset = 0;
        int i;
        int ret;
 
@@ -200,7 +197,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                                goto free_partial_kdata;
                        }
 
-                       ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
+                       ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
+                                                        &uf_offset);
                        if (ret)
                                goto free_partial_kdata;
 
@@ -219,7 +217,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (ret)
                goto free_all_kdata;
 
-       p->job->uf = uf;
+       if (p->uf_entry.robj) {
+               p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
+               p->job->uf_offset = uf_offset;
+       }
 
        kfree(chunk_array);
        return 0;
@@ -377,7 +378,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        INIT_LIST_HEAD(&duplicates);
        amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
-       if (p->job->uf.bo)
+       if (p->uf_entry.robj)
                list_add(&p->uf_entry.tv.head, &p->validated);
 
        if (need_mmap_lock)
@@ -760,17 +761,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                j++;
        }
 
-       /* wrap the last IB with user fence */
-       if (parser->job->uf.bo) {
-               struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
-
-               /* UVD & VCE fw doesn't support user fences */
-               if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
-                   parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
-                       return -EINVAL;
-
-               ib->user = &parser->job->uf;
-       }
+       /* UVD & VCE fw doesn't support user fences */
+       if (parser->job->uf_bo && (
+           parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+           parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+               return -EINVAL;
 
        return 0;
 }
@@ -856,7 +851,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        job->ctx = entity->fence_context;
        p->fence = fence_get(fence);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
-       job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+       job->uf_sequence = cs->out.handle;
 
        trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);