Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_free_handler(struct work_struct *ws)
32 {
33         struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
34         amd_sched_job_put(&job->base);
35 }
36
37 void amdgpu_job_timeout_func(struct work_struct *work)
38 {
39         struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
40         DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
41                                 job->base.sched->name,
42                                 (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
43                                 job->ring->fence_drv.sync_seq);
44
45         amd_sched_job_put(&job->base);
46 }
47
48 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
49                      struct amdgpu_job **job, struct amdgpu_vm *vm)
50 {
51         size_t size = sizeof(struct amdgpu_job);
52
53         if (num_ibs == 0)
54                 return -EINVAL;
55
56         size += sizeof(struct amdgpu_ib) * num_ibs;
57
58         *job = kzalloc(size, GFP_KERNEL);
59         if (!*job)
60                 return -ENOMEM;
61
62         (*job)->adev = adev;
63         (*job)->vm = vm;
64         (*job)->ibs = (void *)&(*job)[1];
65         (*job)->num_ibs = num_ibs;
66         INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
67
68         amdgpu_sync_create(&(*job)->sync);
69
70         return 0;
71 }
72
73 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
74                              struct amdgpu_job **job)
75 {
76         int r;
77
78         r = amdgpu_job_alloc(adev, 1, job, NULL);
79         if (r)
80                 return r;
81
82         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
83         if (r)
84                 kfree(*job);
85
86         return r;
87 }
88
89 void amdgpu_job_free(struct amdgpu_job *job)
90 {
91         unsigned i;
92         struct fence *f;
93         /* use sched fence if available */
94         f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
95
96         for (i = 0; i < job->num_ibs; ++i)
97                 amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
98         fence_put(job->fence);
99
100         amdgpu_bo_unref(&job->uf_bo);
101         amdgpu_sync_free(&job->sync);
102
103         if (!job->base.use_sched)
104                 kfree(job);
105 }
106
107 void amdgpu_job_free_func(struct kref *refcount)
108 {
109         struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
110         kfree(job);
111 }
112
113 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
114                       struct amd_sched_entity *entity, void *owner,
115                       struct fence **f)
116 {
117         struct fence *fence;
118         int r;
119         job->ring = ring;
120
121         if (!f)
122                 return -EINVAL;
123
124         r = amd_sched_job_init(&job->base, &ring->sched,
125                                entity, amdgpu_job_timeout_func,
126                                amdgpu_job_free_func, owner, &fence);
127         if (r)
128                 return r;
129
130         job->owner = owner;
131         job->ctx = entity->fence_context;
132         *f = fence_get(fence);
133         amd_sched_entity_push_job(&job->base);
134
135         return 0;
136 }
137
138 static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
139 {
140         struct amdgpu_job *job = to_amdgpu_job(sched_job);
141         struct amdgpu_vm *vm = job->vm;
142
143         struct fence *fence = amdgpu_sync_get_fence(&job->sync);
144
145         if (fence == NULL && vm && !job->vm_id) {
146                 struct amdgpu_ring *ring = job->ring;
147                 int r;
148
149                 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
150                                       &job->base.s_fence->base,
151                                       &job->vm_id, &job->vm_pd_addr);
152                 if (r)
153                         DRM_ERROR("Error getting VM ID (%d)\n", r);
154
155                 fence = amdgpu_sync_get_fence(&job->sync);
156         }
157
158         return fence;
159 }
160
161 static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
162 {
163         struct fence *fence = NULL;
164         struct amdgpu_job *job;
165         int r;
166
167         if (!sched_job) {
168                 DRM_ERROR("job is null\n");
169                 return NULL;
170         }
171         job = to_amdgpu_job(sched_job);
172
173         r = amdgpu_sync_wait(&job->sync);
174         if (r) {
175                 DRM_ERROR("failed to sync wait (%d)\n", r);
176                 return NULL;
177         }
178
179         trace_amdgpu_sched_run_job(job);
180         r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
181                                job->sync.last_vm_update, job, &fence);
182         if (r) {
183                 DRM_ERROR("Error scheduling IBs (%d)\n", r);
184                 goto err;
185         }
186
187 err:
188         job->fence = fence;
189         amdgpu_job_free(job);
190         return fence;
191 }
192
193 const struct amd_sched_backend_ops amdgpu_sched_ops = {
194         .dependency = amdgpu_job_dependency,
195         .run_job = amdgpu_job_run,
196         .begin_job = amd_sched_job_begin,
197         .finish_job = amd_sched_job_finish,
198 };