drm/amdgpu: use per VM entity for page table updates (v2)
authorChristian König <christian.koenig@amd.com>
Mon, 1 Feb 2016 11:53:58 +0000 (12:53 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 12 Feb 2016 20:35:16 +0000 (15:35 -0500)
Updates from different VMs can be processed independently.

v2: agd: rebase on upstream

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 99e660f..5947a95 100644 (file)
@@ -800,7 +800,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
                             struct amdgpu_job **job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     void *owner, struct fence **f);
+                     struct amd_sched_entity *entity, void *owner,
+                     struct fence **f);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
@@ -917,6 +918,9 @@ struct amdgpu_vm {
 
        /* protecting freed */
        spinlock_t              freed_lock;
+
+       /* Scheduler entity for page table updates */
+       struct amd_sched_entity entity;
 };
 
 struct amdgpu_vm_manager_id {
index 0f6719e..97db6be 100644 (file)
@@ -80,13 +80,17 @@ void amdgpu_job_free(struct amdgpu_job *job)
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     void *owner, struct fence **f)
+                     struct amd_sched_entity *entity, void *owner,
+                     struct fence **f)
 {
        struct amdgpu_device *adev = job->adev;
 
+       if (!entity)
+               entity = &adev->kernel_ctx.rings[ring->idx].entity;
+
        job->ring = ring;
        job->base.sched = &ring->sched;
-       job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+       job->base.s_entity = entity;
        job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
        if (!job->base.s_fence)
                return -ENOMEM;
index e47d518..3deb7d3 100644 (file)
@@ -1053,7 +1053,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+       r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
 
index c536630..f428343 100644 (file)
@@ -880,7 +880,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring,
+               r = amdgpu_job_submit(job, ring, NULL,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err_free;
index fb2ce3e..8a31193 100644 (file)
@@ -481,7 +481,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring,
+               r = amdgpu_job_submit(job, ring, NULL,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err;
index b291b1a..5e38b34 100644 (file)
@@ -322,6 +322,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
  * need to reserve bo first before calling it.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+                             struct amdgpu_vm *vm,
                              struct amdgpu_bo *bo)
 {
        struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
@@ -351,7 +352,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 
        WARN_ON(job->ibs[0].length_dw > 64);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
+       r = amdgpu_job_submit(job, ring, &vm->entity,
+                             AMDGPU_FENCE_OWNER_VM, &fence);
        if (r)
                goto error_free;
 
@@ -476,7 +478,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
                                 AMDGPU_FENCE_OWNER_VM);
                WARN_ON(ib->length_dw > ndw);
-               r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
+               r = amdgpu_job_submit(job, ring, &vm->entity,
+                                     AMDGPU_FENCE_OWNER_VM, &fence);
                if (r)
                        goto error_free;
 
@@ -729,7 +732,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        amdgpu_ring_pad_ib(ring, ib);
        WARN_ON(ib->length_dw > ndw);
-       r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f);
+       r = amdgpu_job_submit(job, ring, &vm->entity,
+                             AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
                goto error_free;
 
@@ -1104,7 +1108,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
                 */
                pt->parent = amdgpu_bo_ref(vm->page_directory);
 
-               r = amdgpu_vm_clear_bo(adev, pt);
+               r = amdgpu_vm_clear_bo(adev, vm, pt);
                if (r) {
                        amdgpu_bo_unref(&pt);
                        goto error_free;
@@ -1265,9 +1269,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
+       struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT * 8);
        unsigned pd_size, pd_entries;
+       struct amd_sched_rq *rq;
        int i, r;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1291,6 +1297,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                return -ENOMEM;
        }
 
+       /* create scheduler entity for page table updates */
+       rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+       r = amd_sched_entity_init(&ring->sched, &vm->entity,
+                                 rq, amdgpu_sched_jobs);
+       if (r)
+               return r;
+
        vm->page_directory_fence = NULL;
 
        r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1298,22 +1311,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                             AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
                             NULL, NULL, &vm->page_directory);
        if (r)
-               return r;
+               goto error_free_sched_entity;
+
        r = amdgpu_bo_reserve(vm->page_directory, false);
-       if (r) {
-               amdgpu_bo_unref(&vm->page_directory);
-               vm->page_directory = NULL;
-               return r;
-       }
-       r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+       if (r)
+               goto error_free_page_directory;
+
+       r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
        amdgpu_bo_unreserve(vm->page_directory);
-       if (r) {
-               amdgpu_bo_unref(&vm->page_directory);
-               vm->page_directory = NULL;
-               return r;
-       }
+       if (r)
+               goto error_free_page_directory;
 
        return 0;
+
+error_free_page_directory:
+       amdgpu_bo_unref(&vm->page_directory);
+       vm->page_directory = NULL;
+
+error_free_sched_entity:
+       amd_sched_entity_fini(&ring->sched, &vm->entity);
+
+       return r;
 }
 
 /**
@@ -1327,9 +1345,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  */
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
+       struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        struct amdgpu_bo_va_mapping *mapping, *tmp;
        int i;
 
+       amd_sched_entity_fini(&ring->sched, &vm->entity);
+
        if (!RB_EMPTY_ROOT(&vm->va)) {
                dev_err(adev->dev, "still active bo inside vm\n");
        }