ea5066998840461d3bf1e18c96b69820d9a78ca0
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include "drmP.h"
27 #include "drm.h"
28
29 #include <linux/completion.h>
30 #include <linux/kds.h>
31 #include <linux/shmem_fs.h>
32 #include <drm/exynos_drm.h>
33
34 #include "exynos_drm_drv.h"
35 #include "exynos_drm_gem.h"
36 #include "exynos_drm_buf.h"
37
38 #ifdef CONFIG_DRM_EXYNOS_DEBUG
39 static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
40 {
41         struct exynos_drm_private *dev_priv = obj->dev->dev_private;
42
43         atomic_inc(&dev_priv->mm.object_count);
44         atomic_add(obj->size, &dev_priv->mm.object_memory);
45 }
46
47 static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
48 {
49         struct exynos_drm_private *dev_priv = obj->dev->dev_private;
50
51         atomic_dec(&dev_priv->mm.object_count);
52         atomic_sub(obj->size, &dev_priv->mm.object_memory);
53 }
54 #else
55 static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
56 {
57 }
58
59 static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
60 {
61 }
62 #endif
63
64 static unsigned int convert_to_vm_err_msg(int msg)
65 {
66         unsigned int out_msg;
67
68         switch (msg) {
69         case 0:
70         case -ERESTARTSYS:
71         case -EINTR:
72                 out_msg = VM_FAULT_NOPAGE;
73                 break;
74
75         case -ENOMEM:
76                 out_msg = VM_FAULT_OOM;
77                 break;
78
79         default:
80                 out_msg = VM_FAULT_SIGBUS;
81                 break;
82         }
83
84         return out_msg;
85 }
86
87 static int check_gem_flags(unsigned int flags)
88 {
89         if (flags & ~(EXYNOS_BO_MASK)) {
90                 DRM_ERROR("invalid flags.\n");
91                 return -EINVAL;
92         }
93
94         return 0;
95 }
96
97 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
98 {
99         if (!IS_NONCONTIG_BUFFER(flags)) {
100                 if (size >= SZ_1M)
101                         return roundup(size, SECTION_SIZE);
102                 else if (size >= SZ_64K)
103                         return roundup(size, SZ_64K);
104                 else
105                         goto out;
106         }
107 out:
108         return roundup(size, PAGE_SIZE);
109 }
110
111 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
112                                                 gfp_t gfpmask)
113 {
114         struct page *p, **pages;
115         int i, npages;
116
117         npages = obj->size >> PAGE_SHIFT;
118
119         pages = drm_malloc_ab(npages, sizeof(struct page *));
120         if (pages == NULL)
121                 return ERR_PTR(-ENOMEM);
122
123         for (i = 0; i < npages; i++) {
124                 p = alloc_page(gfpmask);
125                 if (!p)
126                         goto fail;
127                 pages[i] = p;
128         }
129
130         exynos_gem_info_add_obj(obj);
131
132         return pages;
133
134 fail:
135         while (i--)
136                 __free_page(pages[i]);
137
138         drm_free_large(pages);
139         return ERR_PTR(PTR_ERR(p));
140 }
141
142 static void exynos_gem_put_pages(struct drm_gem_object *obj,
143                                         struct page **pages)
144 {
145         int i, npages;
146
147         exynos_gem_info_remove_obj(obj);
148
149         npages = obj->size >> PAGE_SHIFT;
150
151         for (i = 0; i < npages; i++)
152                 __free_page(pages[i]);
153
154         drm_free_large(pages);
155 }
156
157 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
158                                         struct vm_area_struct *vma,
159                                         unsigned long f_vaddr,
160                                         pgoff_t page_offset)
161 {
162         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164         unsigned long pfn;
165
166         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
167                 if (!buf->pages)
168                         return -EINTR;
169
170                 pfn = page_to_pfn(buf->pages[page_offset++]);
171         } else
172                 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
173
174         return vm_insert_mixed(vma, f_vaddr, pfn);
175 }
176
177 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
178 {
179         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
180         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
181         struct scatterlist *sgl;
182         struct page **pages;
183         unsigned int npages, i = 0;
184         int ret;
185
186         if (buf->pages) {
187                 DRM_DEBUG_KMS("already allocated.\n");
188                 return -EINVAL;
189         }
190
191         pages = exynos_gem_get_pages(obj, GFP_KERNEL);
192         if (IS_ERR(pages)) {
193                 DRM_ERROR("failed to get pages.\n");
194                 return PTR_ERR(pages);
195         }
196
197         npages = obj->size >> PAGE_SHIFT;
198         buf->page_size = PAGE_SIZE;
199
200         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
201         if (!buf->sgt) {
202                 DRM_ERROR("failed to allocate sg table.\n");
203                 ret = -ENOMEM;
204                 goto err;
205         }
206
207         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
208         if (ret < 0) {
209                 DRM_ERROR("failed to initialize sg table.\n");
210                 ret = -EFAULT;
211                 goto err1;
212         }
213
214         sgl = buf->sgt->sgl;
215
216         /* set all pages to sg list. */
217         while (i < npages) {
218                 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
219                 sg_dma_address(sgl) = page_to_phys(pages[i]);
220                 i++;
221                 sgl = sg_next(sgl);
222         }
223
224         /* add some codes for UNCACHED type here. TODO */
225
226         buf->pages = pages;
227         return ret;
228 err1:
229         kfree(buf->sgt);
230         buf->sgt = NULL;
231 err:
232         exynos_gem_put_pages(obj, pages);
233         return ret;
234
235 }
236
237 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
238 {
239         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
240         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
241
242         /*
243          * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
244          * allocated at gem fault handler.
245          */
246         sg_free_table(buf->sgt);
247         kfree(buf->sgt);
248         buf->sgt = NULL;
249
250         exynos_gem_put_pages(obj, buf->pages);
251         buf->pages = NULL;
252
253         /* add some codes for UNCACHED type here. TODO */
254 }
255
256 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
257                                         struct drm_file *file_priv,
258                                         unsigned int *handle)
259 {
260         int ret;
261
262         /*
263          * allocate a id of idr table where the obj is registered
264          * and handle has the id what user can see.
265          */
266         ret = drm_gem_handle_create(file_priv, obj, handle);
267         if (ret)
268                 return ret;
269
270         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
271
272         /* drop reference from allocate - handle holds it now. */
273         drm_gem_object_unreference_unlocked(obj);
274
275         return 0;
276 }
277
278 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
279 {
280         struct drm_gem_object *obj;
281
282         DRM_DEBUG_KMS("%s\n", __FILE__);
283
284         if (!exynos_gem_obj)
285                 return;
286
287         obj = &exynos_gem_obj->base;
288
289         DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
290
291         if (exynos_gem_obj->resource_set != NULL) {
292                 /* kds_resource_set_release NULLs the pointer */
293                 kds_resource_set_release(&exynos_gem_obj->resource_set);
294         }
295
296         if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
297                         exynos_gem_obj->buffer->pages)
298                 exynos_drm_gem_put_pages(obj);
299         else
300                 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
301                                         exynos_gem_obj->buffer);
302
303         exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
304         exynos_gem_obj->buffer = NULL;
305
306         if (obj->map_list.map)
307                 drm_gem_free_mmap_offset(obj);
308
309         /* release file pointer to gem object. */
310         drm_gem_object_release(obj);
311
312         kfree(exynos_gem_obj);
313 }
314
315 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
316                                                       unsigned long size)
317 {
318         struct exynos_drm_gem_obj *exynos_gem_obj;
319         struct drm_gem_object *obj;
320         int ret;
321
322         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
323         if (!exynos_gem_obj) {
324                 DRM_ERROR("failed to allocate exynos gem object\n");
325                 return NULL;
326         }
327
328         obj = &exynos_gem_obj->base;
329
330         ret = drm_gem_object_init(dev, obj, size);
331         if (ret < 0) {
332                 DRM_ERROR("failed to initialize gem object\n");
333                 kfree(exynos_gem_obj);
334                 return NULL;
335         }
336
337         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
338
339         return exynos_gem_obj;
340 }
341
342 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
343                                                 unsigned int flags,
344                                                 unsigned long size)
345 {
346         struct exynos_drm_gem_obj *exynos_gem_obj;
347         struct exynos_drm_gem_buf *buf;
348         int ret;
349
350         if (!size) {
351                 DRM_ERROR("invalid size.\n");
352                 return ERR_PTR(-EINVAL);
353         }
354
355         size = roundup_gem_size(size, flags);
356         DRM_DEBUG_KMS("%s\n", __FILE__);
357
358         ret = check_gem_flags(flags);
359         if (ret)
360                 return ERR_PTR(ret);
361
362         buf = exynos_drm_init_buf(dev, size);
363         if (!buf)
364                 return ERR_PTR(-ENOMEM);
365
366         exynos_gem_obj = exynos_drm_gem_init(dev, size);
367         if (!exynos_gem_obj) {
368                 ret = -ENOMEM;
369                 goto err_fini_buf;
370         }
371
372         exynos_gem_obj->buffer = buf;
373
374         /* set memory type and cache attribute from user side. */
375         exynos_gem_obj->flags = flags;
376
377         /*
378          * allocate all pages as desired size if user wants to allocate
379          * physically non-continuous memory.
380          */
381         if (flags & EXYNOS_BO_NONCONTIG) {
382                 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
383                 if (ret < 0) {
384                         drm_gem_object_release(&exynos_gem_obj->base);
385                         goto err_fini_buf;
386                 }
387         } else {
388                 ret = exynos_drm_alloc_buf(dev, buf, flags);
389                 if (ret < 0) {
390                         drm_gem_object_release(&exynos_gem_obj->base);
391                         goto err_fini_buf;
392                 }
393         }
394
395         return exynos_gem_obj;
396
397 err_fini_buf:
398         exynos_drm_fini_buf(dev, buf);
399         return ERR_PTR(ret);
400 }
401
402 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
403                                 struct drm_file *file_priv)
404 {
405         struct drm_exynos_gem_create *args = data;
406         struct exynos_drm_gem_obj *exynos_gem_obj;
407         int ret;
408
409         DRM_DEBUG_KMS("%s\n", __FILE__);
410
411         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
412         if (IS_ERR(exynos_gem_obj))
413                 return PTR_ERR(exynos_gem_obj);
414
415         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
416                         &args->handle);
417         if (ret) {
418                 exynos_drm_gem_destroy(exynos_gem_obj);
419                 return ret;
420         }
421
422         return 0;
423 }
424
425 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
426                                     struct drm_file *file_priv)
427 {
428         struct drm_exynos_gem_map_off *args = data;
429
430         DRM_DEBUG_KMS("%s\n", __FILE__);
431
432         DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
433                         args->handle, (unsigned long)args->offset);
434
435         if (!(dev->driver->driver_features & DRIVER_GEM)) {
436                 DRM_ERROR("does not support GEM.\n");
437                 return -ENODEV;
438         }
439
440         return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
441                         &args->offset);
442 }
443
444 static int exynos_drm_gem_mmap_buffer(struct file *filp,
445                                       struct vm_area_struct *vma)
446 {
447         struct drm_gem_object *obj = filp->private_data;
448         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
449         struct exynos_drm_gem_buf *buffer;
450         unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
451         int ret;
452
453         DRM_DEBUG_KMS("%s\n", __FILE__);
454
455         vma->vm_flags |= (VM_IO | VM_RESERVED);
456
457         /* in case of direct mapping, always having non-cachable attribute */
458         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
459
460         vm_size = usize = vma->vm_end - vma->vm_start;
461
462         /*
463          * a buffer contains information to physically continuous memory
464          * allocated by user request or at framebuffer creation.
465          */
466         buffer = exynos_gem_obj->buffer;
467
468         /* check if user-requested size is valid. */
469         if (vm_size > buffer->size)
470                 return -EINVAL;
471
472         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
473                 int i = 0;
474
475                 if (!buffer->pages)
476                         return -EINVAL;
477
478                 vma->vm_flags |= VM_MIXEDMAP;
479
480                 do {
481                         ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
482                         if (ret) {
483                                 DRM_ERROR("failed to remap user space.\n");
484                                 return ret;
485                         }
486
487                         uaddr += PAGE_SIZE;
488                         usize -= PAGE_SIZE;
489                 } while (usize > 0);
490         } else {
491                 /*
492                  * get page frame number to physical memory to be mapped
493                  * to user space.
494                  */
495                 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
496                                                                 PAGE_SHIFT;
497
498                 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
499
500                 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
501                                         vma->vm_page_prot)) {
502                         DRM_ERROR("failed to remap pfn range.\n");
503                         return -EAGAIN;
504                 }
505         }
506
507         return 0;
508 }
509
510 static const struct file_operations exynos_drm_gem_fops = {
511         .mmap = exynos_drm_gem_mmap_buffer,
512 };
513
514 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
515                               struct drm_file *file_priv)
516 {
517         struct drm_exynos_gem_mmap *args = data;
518         struct drm_gem_object *obj;
519         unsigned int addr;
520
521         DRM_DEBUG_KMS("%s\n", __FILE__);
522
523         if (!(dev->driver->driver_features & DRIVER_GEM)) {
524                 DRM_ERROR("does not support GEM.\n");
525                 return -ENODEV;
526         }
527
528         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
529         if (!obj) {
530                 DRM_ERROR("failed to lookup gem object.\n");
531                 return -EINVAL;
532         }
533
534         obj->filp->f_op = &exynos_drm_gem_fops;
535         obj->filp->private_data = obj;
536
537         addr = vm_mmap(obj->filp, 0, args->size,
538                         PROT_READ | PROT_WRITE, MAP_SHARED, 0);
539
540         drm_gem_object_unreference_unlocked(obj);
541
542         if (IS_ERR((void *)addr))
543                 return PTR_ERR((void *)addr);
544
545         args->mapped = addr;
546
547         DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
548
549         return 0;
550 }
551
552 static void cpu_acquire_kds_cb_fn(void *param1, void *param2)
553 {
554         struct completion* completion = (struct completion *)param1;
555         complete(completion);
556 }
557
558 int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
559                                 struct drm_file *file)
560 {
561         struct drm_exynos_gem_cpu_acquire *args = data;
562         struct exynos_drm_file_private *file_priv = file->driver_priv;
563         struct drm_gem_object *obj;
564         struct exynos_drm_gem_obj *exynos_gem_obj;
565 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
566         struct kds_resource *kds;
567         struct kds_resource_set *rset;
568         unsigned long exclusive;
569         struct kds_callback callback;
570         DECLARE_COMPLETION_ONSTACK(completion);
571 #endif
572         struct exynos_drm_gem_obj_node *gem_node;
573         int ret = 0;
574
575         DRM_DEBUG_KMS("%s\n", __FILE__);
576
577         mutex_lock(&dev->struct_mutex);
578
579         if (!(dev->driver->driver_features & DRIVER_GEM)) {
580                 DRM_ERROR("does not support GEM.\n");
581                 ret = -ENODEV;
582                 goto unlock;
583         }
584
585         obj = drm_gem_object_lookup(dev, file, args->handle);
586         if (!obj) {
587                 DRM_ERROR("failed to lookup gem object.\n");
588                 ret = -EINVAL;
589                 goto unlock;
590         }
591
592         exynos_gem_obj = to_exynos_gem_obj(obj);
593
594 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
595         if (exynos_gem_obj->base.export_dma_buf == NULL) {
596                 /* If there is no dmabuf present, there is no cross-process/
597                  * cross-device sharing and sync is unnecessary.
598                  */
599                 ret = 0;
600                 goto unref_obj;
601         }
602
603         exclusive = 0;
604         if ((args->flags & DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE) != 0)
605                 exclusive = 1;
606         kds = &exynos_gem_obj->base.export_dma_buf->kds;
607         kds_callback_init(&callback, 1, &cpu_acquire_kds_cb_fn);
608         ret = kds_async_waitall(&rset, KDS_FLAG_LOCKED_WAIT, &callback,
609                 &completion, NULL, 1, &exclusive, &kds);
610         mutex_unlock(&dev->struct_mutex);
611
612         if (!IS_ERR_VALUE(ret))
613                 ret = wait_for_completion_interruptible(&completion);
614         kds_callback_term(&callback);
615
616         mutex_lock(&dev->struct_mutex);
617         if (IS_ERR_VALUE(ret))
618                 goto release_rset;
619 #endif
620
621         gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
622         if (!gem_node) {
623                 DRM_ERROR("failed to allocate eyxnos_drm_gem_obj_node.\n");
624                 ret = -ENOMEM;
625                 goto release_rset;
626         }
627
628 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
629         exynos_gem_obj->resource_set = rset;
630 #endif
631
632         gem_node->exynos_gem_obj = exynos_gem_obj;
633         list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
634         mutex_unlock(&dev->struct_mutex);
635         return 0;
636
637
638 release_rset:
639 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
640         kds_resource_set_release_sync(&rset);
641 #endif
642
643 unref_obj:
644         drm_gem_object_unreference(obj);
645
646 unlock:
647         mutex_unlock(&dev->struct_mutex);
648         return ret;
649 }
650
651 int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
652                                 struct drm_file *file)
653 {
654         struct drm_exynos_gem_cpu_release *args = data;
655         struct exynos_drm_file_private *file_priv = file->driver_priv;
656         struct drm_gem_object *obj;
657         struct exynos_drm_gem_obj *exynos_gem_obj;
658         struct list_head *cur;
659         int ret = 0;
660
661         DRM_DEBUG_KMS("%s\n", __FILE__);
662
663         mutex_lock(&dev->struct_mutex);
664
665         if (!(dev->driver->driver_features & DRIVER_GEM)) {
666                 DRM_ERROR("does not support GEM.\n");
667                 ret = -ENODEV;
668                 goto unlock;
669         }
670
671         obj = drm_gem_object_lookup(dev, file, args->handle);
672         if (!obj) {
673                 DRM_ERROR("failed to lookup gem object.\n");
674                 ret = -EINVAL;
675                 goto unlock;
676         }
677
678         exynos_gem_obj = to_exynos_gem_obj(obj);
679
680 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
681         if (exynos_gem_obj->base.export_dma_buf == NULL) {
682                 /* If there is no dmabuf present, there is no cross-process/
683                  * cross-device sharing and sync is unnecessary.
684                  */
685                 ret = 0;
686                 goto unref_obj;
687         }
688 #endif
689
690         list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
691                 struct exynos_drm_gem_obj_node *node = list_entry(
692                                 cur, struct exynos_drm_gem_obj_node, list);
693                 if (node->exynos_gem_obj == exynos_gem_obj)
694                         break;
695         }
696         if (cur == &file_priv->gem_cpu_acquire_list) {
697                 DRM_ERROR("gem object not acquired for current process.\n");
698                 ret = -EINVAL;
699                 goto unref_obj;
700         }
701
702 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
703         /* kds_resource_set_release NULLs the pointer */
704         BUG_ON(exynos_gem_obj->resource_set == NULL);
705         kds_resource_set_release(&exynos_gem_obj->resource_set);
706 #endif
707
708         list_del(cur);
709         kfree(list_entry(cur, struct exynos_drm_gem_obj_node, list));
710         /* unreference for the reference held since cpu_acquire_ioctl */
711         drm_gem_object_unreference(obj);
712         ret = 0;
713
714 unref_obj:
715         /* unreference for the reference from drm_gem_object_lookup() */
716         drm_gem_object_unreference(obj);
717
718 unlock:
719         mutex_unlock(&dev->struct_mutex);
720         return ret;
721 }
722
723 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
724 {
725         struct exynos_drm_gem_obj *exynos_gem_obj;
726         struct exynos_drm_gem_buf *buf;
727
728         DRM_DEBUG_KMS("%s\n", __FILE__);
729
730         exynos_gem_obj = to_exynos_gem_obj(obj);
731         buf = exynos_gem_obj->buffer;
732
733         if (obj->import_attach)
734                 drm_prime_gem_destroy(obj, buf->sgt);
735
736         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
737 }
738
739 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
740                                struct drm_device *dev,
741                                struct drm_mode_create_dumb *args)
742 {
743         struct exynos_drm_gem_obj *exynos_gem_obj;
744         int ret;
745
746         DRM_DEBUG_KMS("%s\n", __FILE__);
747
748         if (!(args->flags & EXYNOS_BO_NONCONTIG)) {
749                 DRM_ERROR("contig buffer allocation not supported.\n");
750                 /*
751                  * HACK: Currently we do not support CONTIG buffer
752                  * allocation from user space. The drm framework
753                  * supports non-contig buffers only. In the next versions
754                  * the option to choose contig/non-contig buffers itself
755                  * is not supported through this flag. For now, we just
756                  * return error.
757                  */
758                 return -EINVAL;
759         }
760
761         /*
762          * alocate memory to be used for framebuffer.
763          * - this callback would be called by user application
764          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
765          */
766
767         args->pitch = args->width * ALIGN(args->bpp, 8) >> 3;
768         args->pitch = round_up(args->pitch, 64);
769
770         args->size = PAGE_ALIGN(args->pitch * args->height);
771
772         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
773         if (IS_ERR(exynos_gem_obj))
774                 return PTR_ERR(exynos_gem_obj);
775
776         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
777                         &args->handle);
778         if (ret) {
779                 exynos_drm_gem_destroy(exynos_gem_obj);
780                 return ret;
781         }
782
783         return 0;
784 }
785
786 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
787                                    struct drm_device *dev, uint32_t handle,
788                                    uint64_t *offset)
789 {
790         struct exynos_drm_gem_obj *exynos_gem_obj;
791         struct drm_gem_object *obj;
792         int ret = 0;
793
794         DRM_DEBUG_KMS("%s\n", __FILE__);
795
796         mutex_lock(&dev->struct_mutex);
797
798         /*
799          * get offset of memory allocated for drm framebuffer.
800          * - this callback would be called by user application
801          *      with DRM_IOCTL_MODE_MAP_DUMB command.
802          */
803
804         obj = drm_gem_object_lookup(dev, file_priv, handle);
805         if (!obj) {
806                 DRM_ERROR("failed to lookup gem object.\n");
807                 ret = -EINVAL;
808                 goto unlock;
809         }
810
811         exynos_gem_obj = to_exynos_gem_obj(obj);
812
813         if (!exynos_gem_obj->base.map_list.map) {
814                 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
815                 if (ret)
816                         goto out;
817         }
818
819         *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
820         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
821
822 out:
823         drm_gem_object_unreference(obj);
824 unlock:
825         mutex_unlock(&dev->struct_mutex);
826         return ret;
827 }
828
829 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
830                                 struct drm_device *dev,
831                                 unsigned int handle)
832 {
833         int ret;
834
835         DRM_DEBUG_KMS("%s\n", __FILE__);
836
837         /*
838          * obj->refcount and obj->handle_count are decreased and
839          * if both them are 0 then exynos_drm_gem_free_object()
840          * would be called by callback to release resources.
841          */
842         ret = drm_gem_handle_delete(file_priv, handle);
843         if (ret < 0) {
844                 DRM_ERROR("failed to delete drm_gem_handle.\n");
845                 return ret;
846         }
847
848         return 0;
849 }
850
851 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
852 {
853         struct drm_gem_object *obj = vma->vm_private_data;
854         struct drm_device *dev = obj->dev;
855         unsigned long f_vaddr;
856         pgoff_t page_offset;
857         int ret;
858
859         page_offset = ((unsigned long)vmf->virtual_address -
860                         vma->vm_start) >> PAGE_SHIFT;
861         f_vaddr = (unsigned long)vmf->virtual_address;
862
863         mutex_lock(&dev->struct_mutex);
864
865         ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
866         if (ret < 0)
867                 DRM_ERROR("failed to map pages.\n");
868
869         mutex_unlock(&dev->struct_mutex);
870
871         return convert_to_vm_err_msg(ret);
872 }
873
874 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
875 {
876         int ret;
877
878         DRM_DEBUG_KMS("%s\n", __FILE__);
879
880         /* set vm_area_struct. */
881         ret = drm_gem_mmap(filp, vma);
882         if (ret < 0) {
883                 DRM_ERROR("failed to mmap.\n");
884                 return ret;
885         }
886
887         vma->vm_flags &= ~VM_PFNMAP;
888         vma->vm_flags |= VM_MIXEDMAP;
889
890         return ret;
891 }