cfg80211: handle failed skb allocation
[cascardo/linux.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
19
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_mmu.h"
24
25 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26 {
27         struct drm_device *dev = etnaviv_obj->base.dev;
28         struct sg_table *sgt = etnaviv_obj->sgt;
29
30         /*
31          * For non-cached buffers, ensure the new pages are clean
32          * because display controller, GPU, etc. are not coherent.
33          */
34         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35                 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36 }
37
38 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39 {
40         struct drm_device *dev = etnaviv_obj->base.dev;
41         struct sg_table *sgt = etnaviv_obj->sgt;
42
43         /*
44          * For non-cached buffers, ensure the new pages are clean
45          * because display controller, GPU, etc. are not coherent:
46          *
47          * WARNING: The DMA API does not support concurrent CPU
48          * and device access to the memory area.  With BIDIRECTIONAL,
49          * we will clean the cache lines which overlap the region,
50          * and invalidate all cache lines (partially) contained in
51          * the region.
52          *
53          * If you have dirty data in the overlapping cache lines,
54          * that will corrupt the GPU-written data.  If you have
55          * written into the remainder of the region, this can
56          * discard those writes.
57          */
58         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59                 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60 }
61
62 /* called with etnaviv_obj->lock held */
63 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64 {
65         struct drm_device *dev = etnaviv_obj->base.dev;
66         struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68         if (IS_ERR(p)) {
69                 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70                 return PTR_ERR(p);
71         }
72
73         etnaviv_obj->pages = p;
74
75         return 0;
76 }
77
78 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79 {
80         if (etnaviv_obj->sgt) {
81                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82                 sg_free_table(etnaviv_obj->sgt);
83                 kfree(etnaviv_obj->sgt);
84                 etnaviv_obj->sgt = NULL;
85         }
86         if (etnaviv_obj->pages) {
87                 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88                                   true, false);
89
90                 etnaviv_obj->pages = NULL;
91         }
92 }
93
94 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95 {
96         int ret;
97
98         lockdep_assert_held(&etnaviv_obj->lock);
99
100         if (!etnaviv_obj->pages) {
101                 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102                 if (ret < 0)
103                         return ERR_PTR(ret);
104         }
105
106         if (!etnaviv_obj->sgt) {
107                 struct drm_device *dev = etnaviv_obj->base.dev;
108                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109                 struct sg_table *sgt;
110
111                 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112                 if (IS_ERR(sgt)) {
113                         dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114                                 PTR_ERR(sgt));
115                         return ERR_CAST(sgt);
116                 }
117
118                 etnaviv_obj->sgt = sgt;
119
120                 etnaviv_gem_scatter_map(etnaviv_obj);
121         }
122
123         return etnaviv_obj->pages;
124 }
125
126 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127 {
128         lockdep_assert_held(&etnaviv_obj->lock);
129         /* when we start tracking the pin count, then do something here */
130 }
131
132 static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133                 struct vm_area_struct *vma)
134 {
135         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136         pgprot_t vm_page_prot;
137
138         vma->vm_flags &= ~VM_PFNMAP;
139         vma->vm_flags |= VM_MIXEDMAP;
140
141         vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143         if (etnaviv_obj->flags & ETNA_BO_WC) {
144                 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145         } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146                 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147         } else {
148                 /*
149                  * Shunt off cached objs to shmem file so they have their own
150                  * address_space (so unmap_mapping_range does what we want,
151                  * in particular in the case of mmap'd dmabufs)
152                  */
153                 fput(vma->vm_file);
154                 get_file(obj->filp);
155                 vma->vm_pgoff = 0;
156                 vma->vm_file  = obj->filp;
157
158                 vma->vm_page_prot = vm_page_prot;
159         }
160
161         return 0;
162 }
163
164 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165 {
166         struct etnaviv_gem_object *obj;
167         int ret;
168
169         ret = drm_gem_mmap(filp, vma);
170         if (ret) {
171                 DBG("mmap failed: %d", ret);
172                 return ret;
173         }
174
175         obj = to_etnaviv_bo(vma->vm_private_data);
176         return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177 }
178
179 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180 {
181         struct drm_gem_object *obj = vma->vm_private_data;
182         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183         struct page **pages, *page;
184         pgoff_t pgoff;
185         int ret;
186
187         /*
188          * Make sure we don't parallel update on a fault, nor move or remove
189          * something from beneath our feet.  Note that vm_insert_page() is
190          * specifically coded to take care of this, so we don't have to.
191          */
192         ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193         if (ret)
194                 goto out;
195
196         /* make sure we have pages attached now */
197         pages = etnaviv_gem_get_pages(etnaviv_obj);
198         mutex_unlock(&etnaviv_obj->lock);
199
200         if (IS_ERR(pages)) {
201                 ret = PTR_ERR(pages);
202                 goto out;
203         }
204
205         /* We don't use vmf->pgoff since that has the fake offset: */
206         pgoff = ((unsigned long)vmf->virtual_address -
207                         vma->vm_start) >> PAGE_SHIFT;
208
209         page = pages[pgoff];
210
211         VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212              page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214         ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216 out:
217         switch (ret) {
218         case -EAGAIN:
219         case 0:
220         case -ERESTARTSYS:
221         case -EINTR:
222         case -EBUSY:
223                 /*
224                  * EBUSY is ok: this just means that another thread
225                  * already did the job.
226                  */
227                 return VM_FAULT_NOPAGE;
228         case -ENOMEM:
229                 return VM_FAULT_OOM;
230         default:
231                 return VM_FAULT_SIGBUS;
232         }
233 }
234
235 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236 {
237         int ret;
238
239         /* Make it mmapable */
240         ret = drm_gem_create_mmap_offset(obj);
241         if (ret)
242                 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243         else
244                 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246         return ret;
247 }
248
249 static struct etnaviv_vram_mapping *
250 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251                              struct etnaviv_iommu *mmu)
252 {
253         struct etnaviv_vram_mapping *mapping;
254
255         list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256                 if (mapping->mmu == mmu)
257                         return mapping;
258         }
259
260         return NULL;
261 }
262
263 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
264 {
265         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
266
267         drm_gem_object_reference(&etnaviv_obj->base);
268
269         mutex_lock(&etnaviv_obj->lock);
270         WARN_ON(mapping->use == 0);
271         mapping->use += 1;
272         mutex_unlock(&etnaviv_obj->lock);
273 }
274
275 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
276 {
277         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
278
279         mutex_lock(&etnaviv_obj->lock);
280         WARN_ON(mapping->use == 0);
281         mapping->use -= 1;
282         mutex_unlock(&etnaviv_obj->lock);
283
284         drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
285 }
286
287 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
288         struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
289 {
290         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
291         struct etnaviv_vram_mapping *mapping;
292         struct page **pages;
293         int ret = 0;
294
295         mutex_lock(&etnaviv_obj->lock);
296         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
297         if (mapping) {
298                 /*
299                  * Holding the object lock prevents the use count changing
300                  * beneath us.  If the use count is zero, the MMU might be
301                  * reaping this object, so take the lock and re-check that
302                  * the MMU owns this mapping to close this race.
303                  */
304                 if (mapping->use == 0) {
305                         mutex_lock(&gpu->mmu->lock);
306                         if (mapping->mmu == gpu->mmu)
307                                 mapping->use += 1;
308                         else
309                                 mapping = NULL;
310                         mutex_unlock(&gpu->mmu->lock);
311                         if (mapping)
312                                 goto out;
313                 } else {
314                         mapping->use += 1;
315                         goto out;
316                 }
317         }
318
319         pages = etnaviv_gem_get_pages(etnaviv_obj);
320         if (IS_ERR(pages)) {
321                 ret = PTR_ERR(pages);
322                 goto out;
323         }
324
325         /*
326          * See if we have a reaped vram mapping we can re-use before
327          * allocating a fresh mapping.
328          */
329         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
330         if (!mapping) {
331                 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
332                 if (!mapping) {
333                         ret = -ENOMEM;
334                         goto out;
335                 }
336
337                 INIT_LIST_HEAD(&mapping->scan_node);
338                 mapping->object = etnaviv_obj;
339         } else {
340                 list_del(&mapping->obj_node);
341         }
342
343         mapping->mmu = gpu->mmu;
344         mapping->use = 1;
345
346         ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
347                                     mapping);
348         if (ret < 0)
349                 kfree(mapping);
350         else
351                 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
352
353 out:
354         mutex_unlock(&etnaviv_obj->lock);
355
356         if (ret)
357                 return ERR_PTR(ret);
358
359         /* Take a reference on the object */
360         drm_gem_object_reference(obj);
361         return mapping;
362 }
363
364 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
365 {
366         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
367
368         if (etnaviv_obj->vaddr)
369                 return etnaviv_obj->vaddr;
370
371         mutex_lock(&etnaviv_obj->lock);
372         /*
373          * Need to check again, as we might have raced with another thread
374          * while waiting for the mutex.
375          */
376         if (!etnaviv_obj->vaddr)
377                 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
378         mutex_unlock(&etnaviv_obj->lock);
379
380         return etnaviv_obj->vaddr;
381 }
382
383 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
384 {
385         struct page **pages;
386
387         lockdep_assert_held(&obj->lock);
388
389         pages = etnaviv_gem_get_pages(obj);
390         if (IS_ERR(pages))
391                 return NULL;
392
393         return vmap(pages, obj->base.size >> PAGE_SHIFT,
394                         VM_MAP, pgprot_writecombine(PAGE_KERNEL));
395 }
396
397 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
398 {
399         if (op & ETNA_PREP_READ)
400                 return DMA_FROM_DEVICE;
401         else if (op & ETNA_PREP_WRITE)
402                 return DMA_TO_DEVICE;
403         else
404                 return DMA_BIDIRECTIONAL;
405 }
406
407 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
408                 struct timespec *timeout)
409 {
410         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
411         struct drm_device *dev = obj->dev;
412         bool write = !!(op & ETNA_PREP_WRITE);
413         int ret;
414
415         if (op & ETNA_PREP_NOSYNC) {
416                 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
417                                                           write))
418                         return -EBUSY;
419         } else {
420                 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
421
422                 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
423                                                           write, true, remain);
424                 if (ret <= 0)
425                         return ret == 0 ? -ETIMEDOUT : ret;
426         }
427
428         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
429                 if (!etnaviv_obj->sgt) {
430                         void *ret;
431
432                         mutex_lock(&etnaviv_obj->lock);
433                         ret = etnaviv_gem_get_pages(etnaviv_obj);
434                         mutex_unlock(&etnaviv_obj->lock);
435                         if (IS_ERR(ret))
436                                 return PTR_ERR(ret);
437                 }
438
439                 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
440                                     etnaviv_obj->sgt->nents,
441                                     etnaviv_op_to_dma_dir(op));
442                 etnaviv_obj->last_cpu_prep_op = op;
443         }
444
445         return 0;
446 }
447
448 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
449 {
450         struct drm_device *dev = obj->dev;
451         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
452
453         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
454                 /* fini without a prep is almost certainly a userspace error */
455                 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
456                 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
457                         etnaviv_obj->sgt->nents,
458                         etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
459                 etnaviv_obj->last_cpu_prep_op = 0;
460         }
461
462         return 0;
463 }
464
465 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
466         struct timespec *timeout)
467 {
468         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
469
470         return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
471 }
472
473 #ifdef CONFIG_DEBUG_FS
474 static void etnaviv_gem_describe_fence(struct fence *fence,
475         const char *type, struct seq_file *m)
476 {
477         if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
478                 seq_printf(m, "\t%9s: %s %s seq %u\n",
479                            type,
480                            fence->ops->get_driver_name(fence),
481                            fence->ops->get_timeline_name(fence),
482                            fence->seqno);
483 }
484
485 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
486 {
487         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
488         struct reservation_object *robj = etnaviv_obj->resv;
489         struct reservation_object_list *fobj;
490         struct fence *fence;
491         unsigned long off = drm_vma_node_start(&obj->vma_node);
492
493         seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
494                         etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
495                         obj->name, obj->refcount.refcount.counter,
496                         off, etnaviv_obj->vaddr, obj->size);
497
498         rcu_read_lock();
499         fobj = rcu_dereference(robj->fence);
500         if (fobj) {
501                 unsigned int i, shared_count = fobj->shared_count;
502
503                 for (i = 0; i < shared_count; i++) {
504                         fence = rcu_dereference(fobj->shared[i]);
505                         etnaviv_gem_describe_fence(fence, "Shared", m);
506                 }
507         }
508
509         fence = rcu_dereference(robj->fence_excl);
510         if (fence)
511                 etnaviv_gem_describe_fence(fence, "Exclusive", m);
512         rcu_read_unlock();
513 }
514
515 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
516         struct seq_file *m)
517 {
518         struct etnaviv_gem_object *etnaviv_obj;
519         int count = 0;
520         size_t size = 0;
521
522         mutex_lock(&priv->gem_lock);
523         list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
524                 struct drm_gem_object *obj = &etnaviv_obj->base;
525
526                 seq_puts(m, "   ");
527                 etnaviv_gem_describe(obj, m);
528                 count++;
529                 size += obj->size;
530         }
531         mutex_unlock(&priv->gem_lock);
532
533         seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
534 }
535 #endif
536
537 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
538 {
539         if (etnaviv_obj->vaddr)
540                 vunmap(etnaviv_obj->vaddr);
541         put_pages(etnaviv_obj);
542 }
543
544 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
545         .get_pages = etnaviv_gem_shmem_get_pages,
546         .release = etnaviv_gem_shmem_release,
547         .vmap = etnaviv_gem_vmap_impl,
548 };
549
550 void etnaviv_gem_free_object(struct drm_gem_object *obj)
551 {
552         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
553         struct etnaviv_vram_mapping *mapping, *tmp;
554
555         /* object should not be active */
556         WARN_ON(is_active(etnaviv_obj));
557
558         list_del(&etnaviv_obj->gem_node);
559
560         list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
561                                  obj_node) {
562                 struct etnaviv_iommu *mmu = mapping->mmu;
563
564                 WARN_ON(mapping->use);
565
566                 if (mmu)
567                         etnaviv_iommu_unmap_gem(mmu, mapping);
568
569                 list_del(&mapping->obj_node);
570                 kfree(mapping);
571         }
572
573         drm_gem_free_mmap_offset(obj);
574         etnaviv_obj->ops->release(etnaviv_obj);
575         if (etnaviv_obj->resv == &etnaviv_obj->_resv)
576                 reservation_object_fini(&etnaviv_obj->_resv);
577         drm_gem_object_release(obj);
578
579         kfree(etnaviv_obj);
580 }
581
582 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
583 {
584         struct etnaviv_drm_private *priv = dev->dev_private;
585         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
586
587         mutex_lock(&priv->gem_lock);
588         list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
589         mutex_unlock(&priv->gem_lock);
590
591         return 0;
592 }
593
594 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
595         struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
596         struct drm_gem_object **obj)
597 {
598         struct etnaviv_gem_object *etnaviv_obj;
599         unsigned sz = sizeof(*etnaviv_obj);
600         bool valid = true;
601
602         /* validate flags */
603         switch (flags & ETNA_BO_CACHE_MASK) {
604         case ETNA_BO_UNCACHED:
605         case ETNA_BO_CACHED:
606         case ETNA_BO_WC:
607                 break;
608         default:
609                 valid = false;
610         }
611
612         if (!valid) {
613                 dev_err(dev->dev, "invalid cache flag: %x\n",
614                         (flags & ETNA_BO_CACHE_MASK));
615                 return -EINVAL;
616         }
617
618         etnaviv_obj = kzalloc(sz, GFP_KERNEL);
619         if (!etnaviv_obj)
620                 return -ENOMEM;
621
622         etnaviv_obj->flags = flags;
623         etnaviv_obj->ops = ops;
624         if (robj) {
625                 etnaviv_obj->resv = robj;
626         } else {
627                 etnaviv_obj->resv = &etnaviv_obj->_resv;
628                 reservation_object_init(&etnaviv_obj->_resv);
629         }
630
631         mutex_init(&etnaviv_obj->lock);
632         INIT_LIST_HEAD(&etnaviv_obj->vram_list);
633
634         *obj = &etnaviv_obj->base;
635
636         return 0;
637 }
638
639 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
640                 u32 size, u32 flags)
641 {
642         struct drm_gem_object *obj = NULL;
643         int ret;
644
645         size = PAGE_ALIGN(size);
646
647         ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
648                                    &etnaviv_gem_shmem_ops, &obj);
649         if (ret)
650                 goto fail;
651
652         ret = drm_gem_object_init(dev, obj, size);
653         if (ret == 0) {
654                 struct address_space *mapping;
655
656                 /*
657                  * Our buffers are kept pinned, so allocating them
658                  * from the MOVABLE zone is a really bad idea, and
659                  * conflicts with CMA.  See coments above new_inode()
660                  * why this is required _and_ expected if you're
661                  * going to pin these pages.
662                  */
663                 mapping = file_inode(obj->filp)->i_mapping;
664                 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
665         }
666
667         if (ret)
668                 goto fail;
669
670         return obj;
671
672 fail:
673         if (obj)
674                 drm_gem_object_unreference_unlocked(obj);
675
676         return ERR_PTR(ret);
677 }
678
679 /* convenience method to construct a GEM buffer object, and userspace handle */
680 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
681                 u32 size, u32 flags, u32 *handle)
682 {
683         struct drm_gem_object *obj;
684         int ret;
685
686         obj = __etnaviv_gem_new(dev, size, flags);
687         if (IS_ERR(obj))
688                 return PTR_ERR(obj);
689
690         ret = etnaviv_gem_obj_add(dev, obj);
691         if (ret < 0) {
692                 drm_gem_object_unreference_unlocked(obj);
693                 return ret;
694         }
695
696         ret = drm_gem_handle_create(file, obj, handle);
697
698         /* drop reference from allocate - handle holds it now */
699         drm_gem_object_unreference_unlocked(obj);
700
701         return ret;
702 }
703
704 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
705                 u32 size, u32 flags)
706 {
707         struct drm_gem_object *obj;
708         int ret;
709
710         obj = __etnaviv_gem_new(dev, size, flags);
711         if (IS_ERR(obj))
712                 return obj;
713
714         ret = etnaviv_gem_obj_add(dev, obj);
715         if (ret < 0) {
716                 drm_gem_object_unreference_unlocked(obj);
717                 return ERR_PTR(ret);
718         }
719
720         return obj;
721 }
722
723 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
724         struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
725         struct etnaviv_gem_object **res)
726 {
727         struct drm_gem_object *obj;
728         int ret;
729
730         ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
731         if (ret)
732                 return ret;
733
734         drm_gem_private_object_init(dev, obj, size);
735
736         *res = to_etnaviv_bo(obj);
737
738         return 0;
739 }
740
741 struct get_pages_work {
742         struct work_struct work;
743         struct mm_struct *mm;
744         struct task_struct *task;
745         struct etnaviv_gem_object *etnaviv_obj;
746 };
747
748 static struct page **etnaviv_gem_userptr_do_get_pages(
749         struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
750 {
751         int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
752         struct page **pvec;
753         uintptr_t ptr;
754
755         pvec = drm_malloc_ab(npages, sizeof(struct page *));
756         if (!pvec)
757                 return ERR_PTR(-ENOMEM);
758
759         pinned = 0;
760         ptr = etnaviv_obj->userptr.ptr;
761
762         down_read(&mm->mmap_sem);
763         while (pinned < npages) {
764                 ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
765                                             !etnaviv_obj->userptr.ro, 0,
766                                             pvec + pinned, NULL);
767                 if (ret < 0)
768                         break;
769
770                 ptr += ret * PAGE_SIZE;
771                 pinned += ret;
772         }
773         up_read(&mm->mmap_sem);
774
775         if (ret < 0) {
776                 release_pages(pvec, pinned, 0);
777                 drm_free_large(pvec);
778                 return ERR_PTR(ret);
779         }
780
781         return pvec;
782 }
783
784 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
785 {
786         struct get_pages_work *work = container_of(_work, typeof(*work), work);
787         struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
788         struct page **pvec;
789
790         pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
791
792         mutex_lock(&etnaviv_obj->lock);
793         if (IS_ERR(pvec)) {
794                 etnaviv_obj->userptr.work = ERR_CAST(pvec);
795         } else {
796                 etnaviv_obj->userptr.work = NULL;
797                 etnaviv_obj->pages = pvec;
798         }
799
800         mutex_unlock(&etnaviv_obj->lock);
801         drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
802
803         mmput(work->mm);
804         put_task_struct(work->task);
805         kfree(work);
806 }
807
808 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
809 {
810         struct page **pvec = NULL;
811         struct get_pages_work *work;
812         struct mm_struct *mm;
813         int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
814
815         if (etnaviv_obj->userptr.work) {
816                 if (IS_ERR(etnaviv_obj->userptr.work)) {
817                         ret = PTR_ERR(etnaviv_obj->userptr.work);
818                         etnaviv_obj->userptr.work = NULL;
819                 } else {
820                         ret = -EAGAIN;
821                 }
822                 return ret;
823         }
824
825         mm = get_task_mm(etnaviv_obj->userptr.task);
826         pinned = 0;
827         if (mm == current->mm) {
828                 pvec = drm_malloc_ab(npages, sizeof(struct page *));
829                 if (!pvec) {
830                         mmput(mm);
831                         return -ENOMEM;
832                 }
833
834                 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
835                                                !etnaviv_obj->userptr.ro, pvec);
836                 if (pinned < 0) {
837                         drm_free_large(pvec);
838                         mmput(mm);
839                         return pinned;
840                 }
841
842                 if (pinned == npages) {
843                         etnaviv_obj->pages = pvec;
844                         mmput(mm);
845                         return 0;
846                 }
847         }
848
849         release_pages(pvec, pinned, 0);
850         drm_free_large(pvec);
851
852         work = kmalloc(sizeof(*work), GFP_KERNEL);
853         if (!work) {
854                 mmput(mm);
855                 return -ENOMEM;
856         }
857
858         get_task_struct(current);
859         drm_gem_object_reference(&etnaviv_obj->base);
860
861         work->mm = mm;
862         work->task = current;
863         work->etnaviv_obj = etnaviv_obj;
864
865         etnaviv_obj->userptr.work = &work->work;
866         INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
867
868         etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
869
870         return -EAGAIN;
871 }
872
873 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
874 {
875         if (etnaviv_obj->sgt) {
876                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
877                 sg_free_table(etnaviv_obj->sgt);
878                 kfree(etnaviv_obj->sgt);
879         }
880         if (etnaviv_obj->pages) {
881                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
882
883                 release_pages(etnaviv_obj->pages, npages, 0);
884                 drm_free_large(etnaviv_obj->pages);
885         }
886         put_task_struct(etnaviv_obj->userptr.task);
887 }
888
889 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
890         .get_pages = etnaviv_gem_userptr_get_pages,
891         .release = etnaviv_gem_userptr_release,
892         .vmap = etnaviv_gem_vmap_impl,
893 };
894
895 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
896         uintptr_t ptr, u32 size, u32 flags, u32 *handle)
897 {
898         struct etnaviv_gem_object *etnaviv_obj;
899         int ret;
900
901         ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
902                                       &etnaviv_gem_userptr_ops, &etnaviv_obj);
903         if (ret)
904                 return ret;
905
906         etnaviv_obj->userptr.ptr = ptr;
907         etnaviv_obj->userptr.task = current;
908         etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
909         get_task_struct(current);
910
911         ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
912         if (ret) {
913                 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
914                 return ret;
915         }
916
917         ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
918
919         /* drop reference from allocate - handle holds it now */
920         drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
921
922         return ret;
923 }