cfg80211: handle failed skb allocation
[cascardo/linux.git] / drivers / gpu / drm / msm / msm_gem_submit.c
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "msm_drv.h"
19 #include "msm_gpu.h"
20 #include "msm_gem.h"
21
22 /*
23  * Cmdstream submission:
24  */
25
26 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
27 #define BO_VALID    0x8000
28 #define BO_LOCKED   0x4000
29 #define BO_PINNED   0x2000
30
31 static struct msm_gem_submit *submit_create(struct drm_device *dev,
32                 struct msm_gpu *gpu, int nr)
33 {
34         struct msm_gem_submit *submit;
35         int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
36
37         submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
38         if (submit) {
39                 submit->dev = dev;
40                 submit->gpu = gpu;
41
42                 /* initially, until copy_from_user() and bo lookup succeeds: */
43                 submit->nr_bos = 0;
44                 submit->nr_cmds = 0;
45
46                 INIT_LIST_HEAD(&submit->bo_list);
47                 ww_acquire_init(&submit->ticket, &reservation_ww_class);
48         }
49
50         return submit;
51 }
52
53 static int submit_lookup_objects(struct msm_gem_submit *submit,
54                 struct drm_msm_gem_submit *args, struct drm_file *file)
55 {
56         unsigned i;
57         int ret = 0;
58
59         spin_lock(&file->table_lock);
60
61         for (i = 0; i < args->nr_bos; i++) {
62                 struct drm_msm_gem_submit_bo submit_bo;
63                 struct drm_gem_object *obj;
64                 struct msm_gem_object *msm_obj;
65                 void __user *userptr =
66                         u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
67
68                 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
69                 if (ret) {
70                         ret = -EFAULT;
71                         goto out_unlock;
72                 }
73
74                 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
75                         DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
76                         ret = -EINVAL;
77                         goto out_unlock;
78                 }
79
80                 submit->bos[i].flags = submit_bo.flags;
81                 /* in validate_objects() we figure out if this is true: */
82                 submit->bos[i].iova  = submit_bo.presumed;
83
84                 /* normally use drm_gem_object_lookup(), but for bulk lookup
85                  * all under single table_lock just hit object_idr directly:
86                  */
87                 obj = idr_find(&file->object_idr, submit_bo.handle);
88                 if (!obj) {
89                         DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
90                         ret = -EINVAL;
91                         goto out_unlock;
92                 }
93
94                 msm_obj = to_msm_bo(obj);
95
96                 if (!list_empty(&msm_obj->submit_entry)) {
97                         DRM_ERROR("handle %u at index %u already on submit list\n",
98                                         submit_bo.handle, i);
99                         ret = -EINVAL;
100                         goto out_unlock;
101                 }
102
103                 drm_gem_object_reference(obj);
104
105                 submit->bos[i].obj = msm_obj;
106
107                 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
108         }
109
110 out_unlock:
111         submit->nr_bos = i;
112         spin_unlock(&file->table_lock);
113
114         return ret;
115 }
116
117 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
118 {
119         struct msm_gem_object *msm_obj = submit->bos[i].obj;
120
121         if (submit->bos[i].flags & BO_PINNED)
122                 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
123
124         if (submit->bos[i].flags & BO_LOCKED)
125                 ww_mutex_unlock(&msm_obj->resv->lock);
126
127         if (!(submit->bos[i].flags & BO_VALID))
128                 submit->bos[i].iova = 0;
129
130         submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
131 }
132
133 /* This is where we make sure all the bo's are reserved and pin'd: */
134 static int submit_validate_objects(struct msm_gem_submit *submit)
135 {
136         int contended, slow_locked = -1, i, ret = 0;
137
138 retry:
139         submit->valid = true;
140
141         for (i = 0; i < submit->nr_bos; i++) {
142                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
143                 uint32_t iova;
144
145                 if (slow_locked == i)
146                         slow_locked = -1;
147
148                 contended = i;
149
150                 if (!(submit->bos[i].flags & BO_LOCKED)) {
151                         ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
152                                         &submit->ticket);
153                         if (ret)
154                                 goto fail;
155                         submit->bos[i].flags |= BO_LOCKED;
156                 }
157
158
159                 /* if locking succeeded, pin bo: */
160                 ret = msm_gem_get_iova_locked(&msm_obj->base,
161                                 submit->gpu->id, &iova);
162
163                 /* this would break the logic in the fail path.. there is no
164                  * reason for this to happen, but just to be on the safe side
165                  * let's notice if this starts happening in the future:
166                  */
167                 WARN_ON(ret == -EDEADLK);
168
169                 if (ret)
170                         goto fail;
171
172                 submit->bos[i].flags |= BO_PINNED;
173
174                 if (iova == submit->bos[i].iova) {
175                         submit->bos[i].flags |= BO_VALID;
176                 } else {
177                         submit->bos[i].iova = iova;
178                         submit->bos[i].flags &= ~BO_VALID;
179                         submit->valid = false;
180                 }
181         }
182
183         ww_acquire_done(&submit->ticket);
184
185         return 0;
186
187 fail:
188         for (; i >= 0; i--)
189                 submit_unlock_unpin_bo(submit, i);
190
191         if (slow_locked > 0)
192                 submit_unlock_unpin_bo(submit, slow_locked);
193
194         if (ret == -EDEADLK) {
195                 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
196                 /* we lost out in a seqno race, lock and retry.. */
197                 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
198                                 &submit->ticket);
199                 if (!ret) {
200                         submit->bos[contended].flags |= BO_LOCKED;
201                         slow_locked = contended;
202                         goto retry;
203                 }
204         }
205
206         return ret;
207 }
208
209 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
210                 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
211 {
212         if (idx >= submit->nr_bos) {
213                 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
214                                 idx, submit->nr_bos);
215                 return -EINVAL;
216         }
217
218         if (obj)
219                 *obj = submit->bos[idx].obj;
220         if (iova)
221                 *iova = submit->bos[idx].iova;
222         if (valid)
223                 *valid = !!(submit->bos[idx].flags & BO_VALID);
224
225         return 0;
226 }
227
228 /* process the reloc's and patch up the cmdstream as needed: */
229 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
230                 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
231 {
232         uint32_t i, last_offset = 0;
233         uint32_t *ptr;
234         int ret;
235
236         if (offset % 4) {
237                 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
238                 return -EINVAL;
239         }
240
241         /* For now, just map the entire thing.  Eventually we probably
242          * to do it page-by-page, w/ kmap() if not vmap()d..
243          */
244         ptr = msm_gem_vaddr_locked(&obj->base);
245
246         if (IS_ERR(ptr)) {
247                 ret = PTR_ERR(ptr);
248                 DBG("failed to map: %d", ret);
249                 return ret;
250         }
251
252         for (i = 0; i < nr_relocs; i++) {
253                 struct drm_msm_gem_submit_reloc submit_reloc;
254                 void __user *userptr =
255                         u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
256                 uint32_t iova, off;
257                 bool valid;
258
259                 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
260                 if (ret)
261                         return -EFAULT;
262
263                 if (submit_reloc.submit_offset % 4) {
264                         DRM_ERROR("non-aligned reloc offset: %u\n",
265                                         submit_reloc.submit_offset);
266                         return -EINVAL;
267                 }
268
269                 /* offset in dwords: */
270                 off = submit_reloc.submit_offset / 4;
271
272                 if ((off >= (obj->base.size / 4)) ||
273                                 (off < last_offset)) {
274                         DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
275                         return -EINVAL;
276                 }
277
278                 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
279                 if (ret)
280                         return ret;
281
282                 if (valid)
283                         continue;
284
285                 iova += submit_reloc.reloc_offset;
286
287                 if (submit_reloc.shift < 0)
288                         iova >>= -submit_reloc.shift;
289                 else
290                         iova <<= submit_reloc.shift;
291
292                 ptr[off] = iova | submit_reloc.or;
293
294                 last_offset = off;
295         }
296
297         return 0;
298 }
299
300 static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
301 {
302         unsigned i;
303
304         for (i = 0; i < submit->nr_bos; i++) {
305                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
306                 submit_unlock_unpin_bo(submit, i);
307                 list_del_init(&msm_obj->submit_entry);
308                 drm_gem_object_unreference(&msm_obj->base);
309         }
310
311         ww_acquire_fini(&submit->ticket);
312 }
313
314 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
315                 struct drm_file *file)
316 {
317         struct msm_drm_private *priv = dev->dev_private;
318         struct drm_msm_gem_submit *args = data;
319         struct msm_file_private *ctx = file->driver_priv;
320         struct msm_gem_submit *submit;
321         struct msm_gpu *gpu = priv->gpu;
322         unsigned i;
323         int ret;
324
325         if (!gpu)
326                 return -ENXIO;
327
328         /* for now, we just have 3d pipe.. eventually this would need to
329          * be more clever to dispatch to appropriate gpu module:
330          */
331         if (args->pipe != MSM_PIPE_3D0)
332                 return -EINVAL;
333
334         if (args->nr_cmds > MAX_CMDS)
335                 return -EINVAL;
336
337         submit = submit_create(dev, gpu, args->nr_bos);
338         if (!submit)
339                 return -ENOMEM;
340
341         mutex_lock(&dev->struct_mutex);
342
343         ret = submit_lookup_objects(submit, args, file);
344         if (ret)
345                 goto out;
346
347         ret = submit_validate_objects(submit);
348         if (ret)
349                 goto out;
350
351         for (i = 0; i < args->nr_cmds; i++) {
352                 struct drm_msm_gem_submit_cmd submit_cmd;
353                 void __user *userptr =
354                         u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
355                 struct msm_gem_object *msm_obj;
356                 uint32_t iova;
357
358                 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
359                 if (ret) {
360                         ret = -EFAULT;
361                         goto out;
362                 }
363
364                 /* validate input from userspace: */
365                 switch (submit_cmd.type) {
366                 case MSM_SUBMIT_CMD_BUF:
367                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
368                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
369                         break;
370                 default:
371                         DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
372                         ret = -EINVAL;
373                         goto out;
374                 }
375
376                 ret = submit_bo(submit, submit_cmd.submit_idx,
377                                 &msm_obj, &iova, NULL);
378                 if (ret)
379                         goto out;
380
381                 if (submit_cmd.size % 4) {
382                         DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
383                                         submit_cmd.size);
384                         ret = -EINVAL;
385                         goto out;
386                 }
387
388                 if ((submit_cmd.size + submit_cmd.submit_offset) >=
389                                 msm_obj->base.size) {
390                         DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
391                         ret = -EINVAL;
392                         goto out;
393                 }
394
395                 submit->cmd[i].type = submit_cmd.type;
396                 submit->cmd[i].size = submit_cmd.size / 4;
397                 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
398                 submit->cmd[i].idx  = submit_cmd.submit_idx;
399
400                 if (submit->valid)
401                         continue;
402
403                 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
404                                 submit_cmd.nr_relocs, submit_cmd.relocs);
405                 if (ret)
406                         goto out;
407         }
408
409         submit->nr_cmds = i;
410
411         ret = msm_gpu_submit(gpu, submit, ctx);
412
413         args->fence = submit->fence;
414
415 out:
416         submit_cleanup(submit, !!ret);
417         mutex_unlock(&dev->struct_mutex);
418         return ret;
419 }