drm/amdgpu: move the context from the IBs into the job
authorChristian König <christian.koenig@amd.com>
Fri, 6 May 2016 13:57:42 +0000 (15:57 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 11 May 2016 17:30:31 +0000 (13:30 -0400)
We only have one context for all IBs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

index db87edc..9b55ad3 100644 (file)
@@ -743,7 +743,6 @@ struct amdgpu_ib {
        struct amdgpu_user_fence        *user;
        unsigned                        vm_id;
        uint64_t                        vm_pd_addr;
-       uint64_t                        ctx;
        uint32_t                        gds_base, gds_size;
        uint32_t                        gws_base, gws_size;
        uint32_t                        oa_base, oa_size;
@@ -1262,6 +1261,7 @@ struct amdgpu_job {
        struct fence            *fence; /* the hw fence */
        uint32_t                num_ibs;
        void                    *owner;
+       uint64_t                ctx;
        struct amdgpu_user_fence uf;
 };
 #define to_amdgpu_job(sched_job)               \
index 87ec113..2895d63 100644 (file)
@@ -741,7 +741,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                ib->length_dw = chunk_ib->ib_bytes / 4;
                ib->flags = chunk_ib->flags;
-               ib->ctx = parser->ctx->rings[ring->idx].entity.fence_context;
                j++;
        }
 
@@ -840,6 +839,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                            union drm_amdgpu_cs *cs)
 {
        struct amdgpu_ring *ring = p->job->ring;
+       struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
        struct fence *fence;
        struct amdgpu_job *job;
        int r;
@@ -848,16 +848,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        p->job = NULL;
 
        r = amd_sched_job_init(&job->base, &ring->sched,
-                                               &p->ctx->rings[ring->idx].entity,
-                                               amdgpu_job_timeout_func,
-                                               amdgpu_job_free_func,
-                                               p->filp, &fence);
+                              entity, amdgpu_job_timeout_func,
+                              amdgpu_job_free_func,
+                              p->filp, &fence);
        if (r) {
                amdgpu_job_free(job);
                return r;
        }
 
        job->owner = p->filp;
+       job->ctx = entity->fence_context;
        p->fence = fence_get(fence);
        cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
        job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
index 88b8fda..dacbd2e 100644 (file)
@@ -121,18 +121,26 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib *ib = &ibs[0];
-       struct fence *hwf;
-       struct amdgpu_vm *vm = NULL;
-       unsigned i, patch_offset = ~0;
        bool skip_preamble, need_ctx_switch;
+       unsigned patch_offset = ~0;
+       struct amdgpu_vm *vm;
+       struct fence *hwf;
+       uint64_t ctx;
 
+       unsigned i;
        int r = 0;
 
        if (num_ibs == 0)
                return -EINVAL;
 
-       if (job) /* for domain0 job like ring test, ibs->job is not assigned */
+       /* ring tests don't use a job */
+       if (job) {
                vm = job->vm;
+               ctx = job->ctx;
+       } else {
+               vm = NULL;
+               ctx = 0;
+       }
 
        if (!ring->ready) {
                dev_err(adev->dev, "couldn't schedule ib\n");
@@ -170,8 +178,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* always set cond_exec_polling to CONTINUE */
        *ring->cond_exe_cpu_addr = 1;
 
-       skip_preamble = ring->current_ctx == ib->ctx;
-       need_ctx_switch = ring->current_ctx != ib->ctx;
+       skip_preamble = ring->current_ctx == ctx;
+       need_ctx_switch = ring->current_ctx != ctx;
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
@@ -209,7 +217,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
-       ring->current_ctx = ibs->ctx;
+       ring->current_ctx = ctx;
        amdgpu_ring_commit(ring);
        return 0;
 }
index 917c6f3..a0961f2 100644 (file)
@@ -122,14 +122,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                return -EINVAL;
 
        r = amd_sched_job_init(&job->base, &ring->sched,
-                                                       entity,
-                                                       amdgpu_job_timeout_func,
-                                                       amdgpu_job_free_func,
-                                                       owner, &fence);
+                              entity, amdgpu_job_timeout_func,
+                              amdgpu_job_free_func, owner, &fence);
        if (r)
                return r;
 
        job->owner = owner;
+       job->ctx = entity->fence_context;
        *f = fence_get(fence);
        amd_sched_entity_push_job(&job->base);