Merge branch 'linux-4.7' of git://github.com/skeggsb/linux into drm-next
[cascardo/linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34
35 void
36 nouveau_gem_object_del(struct drm_gem_object *gem)
37 {
38         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
40         struct ttm_buffer_object *bo = &nvbo->bo;
41         struct device *dev = drm->dev->dev;
42         int ret;
43
44         ret = pm_runtime_get_sync(dev);
45         if (WARN_ON(ret < 0 && ret != -EACCES))
46                 return;
47
48         if (gem->import_attach)
49                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
50
51         drm_gem_object_release(gem);
52
53         /* reset filp so nouveau_bo_del_ttm() can test for it */
54         gem->filp = NULL;
55         ttm_bo_unref(&bo);
56
57         pm_runtime_mark_last_busy(dev);
58         pm_runtime_put_autosuspend(dev);
59 }
60
61 int
62 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
63 {
64         struct nouveau_cli *cli = nouveau_cli(file_priv);
65         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
66         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
67         struct nvkm_vma *vma;
68         struct device *dev = drm->dev->dev;
69         int ret;
70
71         if (!cli->vm)
72                 return 0;
73
74         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
75         if (ret)
76                 return ret;
77
78         vma = nouveau_bo_vma_find(nvbo, cli->vm);
79         if (!vma) {
80                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
81                 if (!vma) {
82                         ret = -ENOMEM;
83                         goto out;
84                 }
85
86                 ret = pm_runtime_get_sync(dev);
87                 if (ret < 0 && ret != -EACCES) {
88                         kfree(vma);
89                         goto out;
90                 }
91
92                 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
93                 if (ret)
94                         kfree(vma);
95
96                 pm_runtime_mark_last_busy(dev);
97                 pm_runtime_put_autosuspend(dev);
98         } else {
99                 vma->refcount++;
100         }
101
102 out:
103         ttm_bo_unreserve(&nvbo->bo);
104         return ret;
105 }
106
107 static void
108 nouveau_gem_object_delete(void *data)
109 {
110         struct nvkm_vma *vma = data;
111         nvkm_vm_unmap(vma);
112         nvkm_vm_put(vma);
113         kfree(vma);
114 }
115
116 static void
117 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
118 {
119         const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
120         struct reservation_object *resv = nvbo->bo.resv;
121         struct reservation_object_list *fobj;
122         struct fence *fence = NULL;
123
124         fobj = reservation_object_get_list(resv);
125
126         list_del(&vma->head);
127
128         if (fobj && fobj->shared_count > 1)
129                 ttm_bo_wait(&nvbo->bo, false, false);
130         else if (fobj && fobj->shared_count == 1)
131                 fence = rcu_dereference_protected(fobj->shared[0],
132                                                 reservation_object_held(resv));
133         else
134                 fence = reservation_object_get_excl(nvbo->bo.resv);
135
136         if (fence && mapped) {
137                 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
138         } else {
139                 if (mapped)
140                         nvkm_vm_unmap(vma);
141                 nvkm_vm_put(vma);
142                 kfree(vma);
143         }
144 }
145
146 void
147 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
148 {
149         struct nouveau_cli *cli = nouveau_cli(file_priv);
150         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
151         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
152         struct device *dev = drm->dev->dev;
153         struct nvkm_vma *vma;
154         int ret;
155
156         if (!cli->vm)
157                 return;
158
159         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
160         if (ret)
161                 return;
162
163         vma = nouveau_bo_vma_find(nvbo, cli->vm);
164         if (vma) {
165                 if (--vma->refcount == 0) {
166                         ret = pm_runtime_get_sync(dev);
167                         if (!WARN_ON(ret < 0 && ret != -EACCES)) {
168                                 nouveau_gem_object_unmap(nvbo, vma);
169                                 pm_runtime_mark_last_busy(dev);
170                                 pm_runtime_put_autosuspend(dev);
171                         }
172                 }
173         }
174         ttm_bo_unreserve(&nvbo->bo);
175 }
176
177 int
178 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
179                 uint32_t tile_mode, uint32_t tile_flags,
180                 struct nouveau_bo **pnvbo)
181 {
182         struct nouveau_drm *drm = nouveau_drm(dev);
183         struct nouveau_bo *nvbo;
184         u32 flags = 0;
185         int ret;
186
187         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
188                 flags |= TTM_PL_FLAG_VRAM;
189         if (domain & NOUVEAU_GEM_DOMAIN_GART)
190                 flags |= TTM_PL_FLAG_TT;
191         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
192                 flags |= TTM_PL_FLAG_SYSTEM;
193
194         if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
195                 flags |= TTM_PL_FLAG_UNCACHED;
196
197         ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
198                              tile_flags, NULL, NULL, pnvbo);
199         if (ret)
200                 return ret;
201         nvbo = *pnvbo;
202
203         /* we restrict allowed domains on nv50+ to only the types
204          * that were requested at creation time.  not possibly on
205          * earlier chips without busting the ABI.
206          */
207         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
208                               NOUVEAU_GEM_DOMAIN_GART;
209         if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
210                 nvbo->valid_domains &= domain;
211
212         /* Initialize the embedded gem-object. We return a single gem-reference
213          * to the caller, instead of a normal nouveau_bo ttm reference. */
214         ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
215         if (ret) {
216                 nouveau_bo_ref(NULL, pnvbo);
217                 return -ENOMEM;
218         }
219
220         nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
221         return 0;
222 }
223
224 static int
225 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
226                  struct drm_nouveau_gem_info *rep)
227 {
228         struct nouveau_cli *cli = nouveau_cli(file_priv);
229         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
230         struct nvkm_vma *vma;
231
232         if (is_power_of_2(nvbo->valid_domains))
233                 rep->domain = nvbo->valid_domains;
234         else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
235                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
236         else
237                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
238         rep->offset = nvbo->bo.offset;
239         if (cli->vm) {
240                 vma = nouveau_bo_vma_find(nvbo, cli->vm);
241                 if (!vma)
242                         return -EINVAL;
243
244                 rep->offset = vma->offset;
245         }
246
247         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
248         rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
249         rep->tile_mode = nvbo->tile_mode;
250         rep->tile_flags = nvbo->tile_flags;
251         return 0;
252 }
253
254 int
255 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256                       struct drm_file *file_priv)
257 {
258         struct nouveau_drm *drm = nouveau_drm(dev);
259         struct nouveau_cli *cli = nouveau_cli(file_priv);
260         struct nvkm_fb *fb = nvxx_fb(&drm->device);
261         struct drm_nouveau_gem_new *req = data;
262         struct nouveau_bo *nvbo = NULL;
263         int ret = 0;
264
265         if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
266                 NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
267                 return -EINVAL;
268         }
269
270         ret = nouveau_gem_new(dev, req->info.size, req->align,
271                               req->info.domain, req->info.tile_mode,
272                               req->info.tile_flags, &nvbo);
273         if (ret)
274                 return ret;
275
276         ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
277         if (ret == 0) {
278                 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
279                 if (ret)
280                         drm_gem_handle_delete(file_priv, req->info.handle);
281         }
282
283         /* drop reference from allocate - handle holds it now */
284         drm_gem_object_unreference_unlocked(&nvbo->gem);
285         return ret;
286 }
287
288 static int
289 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
290                        uint32_t write_domains, uint32_t valid_domains)
291 {
292         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
293         struct ttm_buffer_object *bo = &nvbo->bo;
294         uint32_t domains = valid_domains & nvbo->valid_domains &
295                 (write_domains ? write_domains : read_domains);
296         uint32_t pref_flags = 0, valid_flags = 0;
297
298         if (!domains)
299                 return -EINVAL;
300
301         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
302                 valid_flags |= TTM_PL_FLAG_VRAM;
303
304         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
305                 valid_flags |= TTM_PL_FLAG_TT;
306
307         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
308             bo->mem.mem_type == TTM_PL_VRAM)
309                 pref_flags |= TTM_PL_FLAG_VRAM;
310
311         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
312                  bo->mem.mem_type == TTM_PL_TT)
313                 pref_flags |= TTM_PL_FLAG_TT;
314
315         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
316                 pref_flags |= TTM_PL_FLAG_VRAM;
317
318         else
319                 pref_flags |= TTM_PL_FLAG_TT;
320
321         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
322
323         return 0;
324 }
325
326 struct validate_op {
327         struct list_head list;
328         struct ww_acquire_ctx ticket;
329 };
330
331 static void
332 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
333                         struct drm_nouveau_gem_pushbuf_bo *pbbo)
334 {
335         struct nouveau_bo *nvbo;
336         struct drm_nouveau_gem_pushbuf_bo *b;
337
338         while (!list_empty(&op->list)) {
339                 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
340                 b = &pbbo[nvbo->pbbo_index];
341
342                 if (likely(fence))
343                         nouveau_bo_fence(nvbo, fence, !!b->write_domains);
344
345                 if (unlikely(nvbo->validate_mapped)) {
346                         ttm_bo_kunmap(&nvbo->kmap);
347                         nvbo->validate_mapped = false;
348                 }
349
350                 list_del(&nvbo->entry);
351                 nvbo->reserved_by = NULL;
352                 ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
353                 drm_gem_object_unreference_unlocked(&nvbo->gem);
354         }
355 }
356
357 static void
358 validate_fini(struct validate_op *op, struct nouveau_fence *fence,
359               struct drm_nouveau_gem_pushbuf_bo *pbbo)
360 {
361         validate_fini_no_ticket(op, fence, pbbo);
362         ww_acquire_fini(&op->ticket);
363 }
364
365 static int
366 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
367               struct drm_nouveau_gem_pushbuf_bo *pbbo,
368               int nr_buffers, struct validate_op *op)
369 {
370         struct nouveau_cli *cli = nouveau_cli(file_priv);
371         int trycnt = 0;
372         int ret, i;
373         struct nouveau_bo *res_bo = NULL;
374         LIST_HEAD(gart_list);
375         LIST_HEAD(vram_list);
376         LIST_HEAD(both_list);
377
378         ww_acquire_init(&op->ticket, &reservation_ww_class);
379 retry:
380         if (++trycnt > 100000) {
381                 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
382                 return -EINVAL;
383         }
384
385         for (i = 0; i < nr_buffers; i++) {
386                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
387                 struct drm_gem_object *gem;
388                 struct nouveau_bo *nvbo;
389
390                 gem = drm_gem_object_lookup(file_priv, b->handle);
391                 if (!gem) {
392                         NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
393                         ret = -ENOENT;
394                         break;
395                 }
396                 nvbo = nouveau_gem_object(gem);
397                 if (nvbo == res_bo) {
398                         res_bo = NULL;
399                         drm_gem_object_unreference_unlocked(gem);
400                         continue;
401                 }
402
403                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
404                         NV_PRINTK(err, cli, "multiple instances of buffer %d on "
405                                       "validation list\n", b->handle);
406                         drm_gem_object_unreference_unlocked(gem);
407                         ret = -EINVAL;
408                         break;
409                 }
410
411                 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
412                 if (ret) {
413                         list_splice_tail_init(&vram_list, &op->list);
414                         list_splice_tail_init(&gart_list, &op->list);
415                         list_splice_tail_init(&both_list, &op->list);
416                         validate_fini_no_ticket(op, NULL, NULL);
417                         if (unlikely(ret == -EDEADLK)) {
418                                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
419                                                               &op->ticket);
420                                 if (!ret)
421                                         res_bo = nvbo;
422                         }
423                         if (unlikely(ret)) {
424                                 if (ret != -ERESTARTSYS)
425                                         NV_PRINTK(err, cli, "fail reserve\n");
426                                 break;
427                         }
428                 }
429
430                 b->user_priv = (uint64_t)(unsigned long)nvbo;
431                 nvbo->reserved_by = file_priv;
432                 nvbo->pbbo_index = i;
433                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
434                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
435                         list_add_tail(&nvbo->entry, &both_list);
436                 else
437                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
438                         list_add_tail(&nvbo->entry, &vram_list);
439                 else
440                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
441                         list_add_tail(&nvbo->entry, &gart_list);
442                 else {
443                         NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
444                                  b->valid_domains);
445                         list_add_tail(&nvbo->entry, &both_list);
446                         ret = -EINVAL;
447                         break;
448                 }
449                 if (nvbo == res_bo)
450                         goto retry;
451         }
452
453         ww_acquire_done(&op->ticket);
454         list_splice_tail(&vram_list, &op->list);
455         list_splice_tail(&gart_list, &op->list);
456         list_splice_tail(&both_list, &op->list);
457         if (ret)
458                 validate_fini(op, NULL, NULL);
459         return ret;
460
461 }
462
463 static int
464 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
465               struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
466               uint64_t user_pbbo_ptr)
467 {
468         struct nouveau_drm *drm = chan->drm;
469         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
470                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
471         struct nouveau_bo *nvbo;
472         int ret, relocs = 0;
473
474         list_for_each_entry(nvbo, list, entry) {
475                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
476
477                 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
478                                              b->write_domains,
479                                              b->valid_domains);
480                 if (unlikely(ret)) {
481                         NV_PRINTK(err, cli, "fail set_domain\n");
482                         return ret;
483                 }
484
485                 ret = nouveau_bo_validate(nvbo, true, false);
486                 if (unlikely(ret)) {
487                         if (ret != -ERESTARTSYS)
488                                 NV_PRINTK(err, cli, "fail ttm_validate\n");
489                         return ret;
490                 }
491
492                 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
493                 if (unlikely(ret)) {
494                         if (ret != -ERESTARTSYS)
495                                 NV_PRINTK(err, cli, "fail post-validate sync\n");
496                         return ret;
497                 }
498
499                 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
500                         if (nvbo->bo.offset == b->presumed.offset &&
501                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
502                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
503                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
504                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
505                                 continue;
506
507                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
508                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
509                         else
510                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
511                         b->presumed.offset = nvbo->bo.offset;
512                         b->presumed.valid = 0;
513                         relocs++;
514
515                         if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
516                                              &b->presumed, sizeof(b->presumed)))
517                                 return -EFAULT;
518                 }
519         }
520
521         return relocs;
522 }
523
524 static int
525 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
526                              struct drm_file *file_priv,
527                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
528                              uint64_t user_buffers, int nr_buffers,
529                              struct validate_op *op, int *apply_relocs)
530 {
531         struct nouveau_cli *cli = nouveau_cli(file_priv);
532         int ret;
533
534         INIT_LIST_HEAD(&op->list);
535
536         if (nr_buffers == 0)
537                 return 0;
538
539         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
540         if (unlikely(ret)) {
541                 if (ret != -ERESTARTSYS)
542                         NV_PRINTK(err, cli, "validate_init\n");
543                 return ret;
544         }
545
546         ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
547         if (unlikely(ret < 0)) {
548                 if (ret != -ERESTARTSYS)
549                         NV_PRINTK(err, cli, "validating bo list\n");
550                 validate_fini(op, NULL, NULL);
551                 return ret;
552         }
553         *apply_relocs = ret;
554         return 0;
555 }
556
557 static inline void
558 u_free(void *addr)
559 {
560         kvfree(addr);
561 }
562
563 static inline void *
564 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
565 {
566         void *mem;
567         void __user *userptr = (void __force __user *)(uintptr_t)user;
568
569         size *= nmemb;
570
571         mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
572         if (!mem)
573                 mem = vmalloc(size);
574         if (!mem)
575                 return ERR_PTR(-ENOMEM);
576
577         if (copy_from_user(mem, userptr, size)) {
578                 u_free(mem);
579                 return ERR_PTR(-EFAULT);
580         }
581
582         return mem;
583 }
584
585 static int
586 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
587                                 struct drm_nouveau_gem_pushbuf *req,
588                                 struct drm_nouveau_gem_pushbuf_bo *bo)
589 {
590         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
591         int ret = 0;
592         unsigned i;
593
594         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
595         if (IS_ERR(reloc))
596                 return PTR_ERR(reloc);
597
598         for (i = 0; i < req->nr_relocs; i++) {
599                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
600                 struct drm_nouveau_gem_pushbuf_bo *b;
601                 struct nouveau_bo *nvbo;
602                 uint32_t data;
603
604                 if (unlikely(r->bo_index > req->nr_buffers)) {
605                         NV_PRINTK(err, cli, "reloc bo index invalid\n");
606                         ret = -EINVAL;
607                         break;
608                 }
609
610                 b = &bo[r->bo_index];
611                 if (b->presumed.valid)
612                         continue;
613
614                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
615                         NV_PRINTK(err, cli, "reloc container bo index invalid\n");
616                         ret = -EINVAL;
617                         break;
618                 }
619                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
620
621                 if (unlikely(r->reloc_bo_offset + 4 >
622                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
623                         NV_PRINTK(err, cli, "reloc outside of bo\n");
624                         ret = -EINVAL;
625                         break;
626                 }
627
628                 if (!nvbo->kmap.virtual) {
629                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
630                                           &nvbo->kmap);
631                         if (ret) {
632                                 NV_PRINTK(err, cli, "failed kmap for reloc\n");
633                                 break;
634                         }
635                         nvbo->validate_mapped = true;
636                 }
637
638                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
639                         data = b->presumed.offset + r->data;
640                 else
641                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
642                         data = (b->presumed.offset + r->data) >> 32;
643                 else
644                         data = r->data;
645
646                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
647                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
648                                 data |= r->tor;
649                         else
650                                 data |= r->vor;
651                 }
652
653                 ret = ttm_bo_wait(&nvbo->bo, false, false);
654                 if (ret) {
655                         NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
656                         break;
657                 }
658
659                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
660         }
661
662         u_free(reloc);
663         return ret;
664 }
665
666 int
667 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
668                           struct drm_file *file_priv)
669 {
670         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
671         struct nouveau_cli *cli = nouveau_cli(file_priv);
672         struct nouveau_abi16_chan *temp;
673         struct nouveau_drm *drm = nouveau_drm(dev);
674         struct drm_nouveau_gem_pushbuf *req = data;
675         struct drm_nouveau_gem_pushbuf_push *push;
676         struct drm_nouveau_gem_pushbuf_bo *bo;
677         struct nouveau_channel *chan = NULL;
678         struct validate_op op;
679         struct nouveau_fence *fence = NULL;
680         int i, j, ret = 0, do_reloc = 0;
681
682         if (unlikely(!abi16))
683                 return -ENOMEM;
684
685         list_for_each_entry(temp, &abi16->channels, head) {
686                 if (temp->chan->chid == req->channel) {
687                         chan = temp->chan;
688                         break;
689                 }
690         }
691
692         if (!chan)
693                 return nouveau_abi16_put(abi16, -ENOENT);
694
695         req->vram_available = drm->gem.vram_available;
696         req->gart_available = drm->gem.gart_available;
697         if (unlikely(req->nr_push == 0))
698                 goto out_next;
699
700         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
701                 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
702                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
703                 return nouveau_abi16_put(abi16, -EINVAL);
704         }
705
706         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
707                 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
708                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
709                 return nouveau_abi16_put(abi16, -EINVAL);
710         }
711
712         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
713                 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
714                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
715                 return nouveau_abi16_put(abi16, -EINVAL);
716         }
717
718         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
719         if (IS_ERR(push))
720                 return nouveau_abi16_put(abi16, PTR_ERR(push));
721
722         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
723         if (IS_ERR(bo)) {
724                 u_free(push);
725                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
726         }
727
728         /* Ensure all push buffers are on validate list */
729         for (i = 0; i < req->nr_push; i++) {
730                 if (push[i].bo_index >= req->nr_buffers) {
731                         NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
732                         ret = -EINVAL;
733                         goto out_prevalid;
734                 }
735         }
736
737         /* Validate buffer list */
738         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
739                                            req->nr_buffers, &op, &do_reloc);
740         if (ret) {
741                 if (ret != -ERESTARTSYS)
742                         NV_PRINTK(err, cli, "validate: %d\n", ret);
743                 goto out_prevalid;
744         }
745
746         /* Apply any relocations that are required */
747         if (do_reloc) {
748                 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
749                 if (ret) {
750                         NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
751                         goto out;
752                 }
753         }
754
755         if (chan->dma.ib_max) {
756                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
757                 if (ret) {
758                         NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
759                         goto out;
760                 }
761
762                 for (i = 0; i < req->nr_push; i++) {
763                         struct nouveau_bo *nvbo = (void *)(unsigned long)
764                                 bo[push[i].bo_index].user_priv;
765
766                         nv50_dma_push(chan, nvbo, push[i].offset,
767                                       push[i].length);
768                 }
769         } else
770         if (drm->device.info.chipset >= 0x25) {
771                 ret = RING_SPACE(chan, req->nr_push * 2);
772                 if (ret) {
773                         NV_PRINTK(err, cli, "cal_space: %d\n", ret);
774                         goto out;
775                 }
776
777                 for (i = 0; i < req->nr_push; i++) {
778                         struct nouveau_bo *nvbo = (void *)(unsigned long)
779                                 bo[push[i].bo_index].user_priv;
780
781                         OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
782                         OUT_RING(chan, 0);
783                 }
784         } else {
785                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
786                 if (ret) {
787                         NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
788                         goto out;
789                 }
790
791                 for (i = 0; i < req->nr_push; i++) {
792                         struct nouveau_bo *nvbo = (void *)(unsigned long)
793                                 bo[push[i].bo_index].user_priv;
794                         uint32_t cmd;
795
796                         cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
797                         cmd |= 0x20000000;
798                         if (unlikely(cmd != req->suffix0)) {
799                                 if (!nvbo->kmap.virtual) {
800                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
801                                                           nvbo->bo.mem.
802                                                           num_pages,
803                                                           &nvbo->kmap);
804                                         if (ret) {
805                                                 WIND_RING(chan);
806                                                 goto out;
807                                         }
808                                         nvbo->validate_mapped = true;
809                                 }
810
811                                 nouveau_bo_wr32(nvbo, (push[i].offset +
812                                                 push[i].length - 8) / 4, cmd);
813                         }
814
815                         OUT_RING(chan, 0x20000000 |
816                                       (nvbo->bo.offset + push[i].offset));
817                         OUT_RING(chan, 0);
818                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
819                                 OUT_RING(chan, 0);
820                 }
821         }
822
823         ret = nouveau_fence_new(chan, false, &fence);
824         if (ret) {
825                 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
826                 WIND_RING(chan);
827                 goto out;
828         }
829
830 out:
831         validate_fini(&op, fence, bo);
832         nouveau_fence_unref(&fence);
833
834 out_prevalid:
835         u_free(bo);
836         u_free(push);
837
838 out_next:
839         if (chan->dma.ib_max) {
840                 req->suffix0 = 0x00000000;
841                 req->suffix1 = 0x00000000;
842         } else
843         if (drm->device.info.chipset >= 0x25) {
844                 req->suffix0 = 0x00020000;
845                 req->suffix1 = 0x00000000;
846         } else {
847                 req->suffix0 = 0x20000000 |
848                               (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
849                 req->suffix1 = 0x00000000;
850         }
851
852         return nouveau_abi16_put(abi16, ret);
853 }
854
855 int
856 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
857                            struct drm_file *file_priv)
858 {
859         struct drm_nouveau_gem_cpu_prep *req = data;
860         struct drm_gem_object *gem;
861         struct nouveau_bo *nvbo;
862         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
863         bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
864         int ret;
865
866         gem = drm_gem_object_lookup(file_priv, req->handle);
867         if (!gem)
868                 return -ENOENT;
869         nvbo = nouveau_gem_object(gem);
870
871         if (no_wait)
872                 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
873         else {
874                 long lret;
875
876                 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
877                 if (!lret)
878                         ret = -EBUSY;
879                 else if (lret > 0)
880                         ret = 0;
881                 else
882                         ret = lret;
883         }
884         nouveau_bo_sync_for_cpu(nvbo);
885         drm_gem_object_unreference_unlocked(gem);
886
887         return ret;
888 }
889
890 int
891 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
892                            struct drm_file *file_priv)
893 {
894         struct drm_nouveau_gem_cpu_fini *req = data;
895         struct drm_gem_object *gem;
896         struct nouveau_bo *nvbo;
897
898         gem = drm_gem_object_lookup(file_priv, req->handle);
899         if (!gem)
900                 return -ENOENT;
901         nvbo = nouveau_gem_object(gem);
902
903         nouveau_bo_sync_for_device(nvbo);
904         drm_gem_object_unreference_unlocked(gem);
905         return 0;
906 }
907
908 int
909 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
910                        struct drm_file *file_priv)
911 {
912         struct drm_nouveau_gem_info *req = data;
913         struct drm_gem_object *gem;
914         int ret;
915
916         gem = drm_gem_object_lookup(file_priv, req->handle);
917         if (!gem)
918                 return -ENOENT;
919
920         ret = nouveau_gem_info(file_priv, gem, req);
921         drm_gem_object_unreference_unlocked(gem);
922         return ret;
923 }
924