3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/completion.h>
30 #include <linux/kds.h>
31 #include <linux/shmem_fs.h>
32 #include <drm/exynos_drm.h>
34 #include "exynos_drm_drv.h"
35 #include "exynos_drm_gem.h"
36 #include "exynos_drm_buf.h"
38 #ifdef CONFIG_DRM_EXYNOS_DEBUG
39 static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
41 struct exynos_drm_private *dev_priv = obj->dev->dev_private;
43 atomic_inc(&dev_priv->mm.object_count);
44 atomic_add(obj->size, &dev_priv->mm.object_memory);
47 static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
49 struct exynos_drm_private *dev_priv = obj->dev->dev_private;
51 atomic_dec(&dev_priv->mm.object_count);
52 atomic_sub(obj->size, &dev_priv->mm.object_memory);
55 static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
59 static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
64 static unsigned int convert_to_vm_err_msg(int msg)
72 out_msg = VM_FAULT_NOPAGE;
76 out_msg = VM_FAULT_OOM;
80 out_msg = VM_FAULT_SIGBUS;
87 static int check_gem_flags(unsigned int flags)
89 if (flags & ~(EXYNOS_BO_MASK)) {
90 DRM_ERROR("invalid flags.\n");
97 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
99 if (!IS_NONCONTIG_BUFFER(flags)) {
101 return roundup(size, SECTION_SIZE);
102 else if (size >= SZ_64K)
103 return roundup(size, SZ_64K);
108 return roundup(size, PAGE_SIZE);
111 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
114 struct page *p, **pages;
117 npages = obj->size >> PAGE_SHIFT;
119 pages = drm_malloc_ab(npages, sizeof(struct page *));
121 return ERR_PTR(-ENOMEM);
123 for (i = 0; i < npages; i++) {
124 p = alloc_page(gfpmask);
130 exynos_gem_info_add_obj(obj);
136 __free_page(pages[i]);
138 drm_free_large(pages);
139 return ERR_PTR(PTR_ERR(p));
142 static void exynos_gem_put_pages(struct drm_gem_object *obj,
147 exynos_gem_info_remove_obj(obj);
149 npages = obj->size >> PAGE_SHIFT;
151 for (i = 0; i < npages; i++)
152 __free_page(pages[i]);
154 drm_free_large(pages);
157 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
158 struct vm_area_struct *vma,
159 unsigned long f_vaddr,
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
166 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
170 pfn = page_to_pfn(buf->pages[page_offset++]);
172 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
174 return vm_insert_mixed(vma, f_vaddr, pfn);
177 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
179 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
180 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
181 struct scatterlist *sgl;
183 unsigned int npages, i = 0;
187 DRM_DEBUG_KMS("already allocated.\n");
191 pages = exynos_gem_get_pages(obj, GFP_KERNEL);
193 DRM_ERROR("failed to get pages.\n");
194 return PTR_ERR(pages);
197 npages = obj->size >> PAGE_SHIFT;
198 buf->page_size = PAGE_SIZE;
200 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
202 DRM_ERROR("failed to allocate sg table.\n");
207 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
209 DRM_ERROR("failed to initialize sg table.\n");
216 /* set all pages to sg list. */
218 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
219 sg_dma_address(sgl) = page_to_phys(pages[i]);
224 /* add some codes for UNCACHED type here. TODO */
232 exynos_gem_put_pages(obj, pages);
237 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
239 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
240 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
243 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
244 * allocated at gem fault handler.
246 sg_free_table(buf->sgt);
250 exynos_gem_put_pages(obj, buf->pages);
253 /* add some codes for UNCACHED type here. TODO */
256 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
257 struct drm_file *file_priv,
258 unsigned int *handle)
263 * allocate a id of idr table where the obj is registered
264 * and handle has the id what user can see.
266 ret = drm_gem_handle_create(file_priv, obj, handle);
270 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
272 /* drop reference from allocate - handle holds it now. */
273 drm_gem_object_unreference_unlocked(obj);
278 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
280 struct drm_gem_object *obj;
282 DRM_DEBUG_KMS("%s\n", __FILE__);
287 obj = &exynos_gem_obj->base;
289 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
291 if (exynos_gem_obj->resource_set != NULL) {
292 /* kds_resource_set_release NULLs the pointer */
293 kds_resource_set_release(&exynos_gem_obj->resource_set);
296 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
297 exynos_gem_obj->buffer->pages)
298 exynos_drm_gem_put_pages(obj);
300 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
301 exynos_gem_obj->buffer);
303 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
304 exynos_gem_obj->buffer = NULL;
306 if (obj->map_list.map)
307 drm_gem_free_mmap_offset(obj);
309 /* release file pointer to gem object. */
310 drm_gem_object_release(obj);
312 kfree(exynos_gem_obj);
315 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
318 struct exynos_drm_gem_obj *exynos_gem_obj;
319 struct drm_gem_object *obj;
322 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
323 if (!exynos_gem_obj) {
324 DRM_ERROR("failed to allocate exynos gem object\n");
328 obj = &exynos_gem_obj->base;
330 ret = drm_gem_object_init(dev, obj, size);
332 DRM_ERROR("failed to initialize gem object\n");
333 kfree(exynos_gem_obj);
337 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
339 return exynos_gem_obj;
342 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
346 struct exynos_drm_gem_obj *exynos_gem_obj;
347 struct exynos_drm_gem_buf *buf;
351 DRM_ERROR("invalid size.\n");
352 return ERR_PTR(-EINVAL);
355 size = roundup_gem_size(size, flags);
356 DRM_DEBUG_KMS("%s\n", __FILE__);
358 ret = check_gem_flags(flags);
362 buf = exynos_drm_init_buf(dev, size);
364 return ERR_PTR(-ENOMEM);
366 exynos_gem_obj = exynos_drm_gem_init(dev, size);
367 if (!exynos_gem_obj) {
372 exynos_gem_obj->buffer = buf;
374 /* set memory type and cache attribute from user side. */
375 exynos_gem_obj->flags = flags;
378 * allocate all pages as desired size if user wants to allocate
379 * physically non-continuous memory.
381 if (flags & EXYNOS_BO_NONCONTIG) {
382 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
384 drm_gem_object_release(&exynos_gem_obj->base);
388 ret = exynos_drm_alloc_buf(dev, buf, flags);
390 drm_gem_object_release(&exynos_gem_obj->base);
395 return exynos_gem_obj;
398 exynos_drm_fini_buf(dev, buf);
402 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
403 struct drm_file *file_priv)
405 struct drm_exynos_gem_create *args = data;
406 struct exynos_drm_gem_obj *exynos_gem_obj;
409 DRM_DEBUG_KMS("%s\n", __FILE__);
411 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
412 if (IS_ERR(exynos_gem_obj))
413 return PTR_ERR(exynos_gem_obj);
415 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
418 exynos_drm_gem_destroy(exynos_gem_obj);
425 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *file_priv)
428 struct drm_exynos_gem_map_off *args = data;
430 DRM_DEBUG_KMS("%s\n", __FILE__);
432 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
433 args->handle, (unsigned long)args->offset);
435 if (!(dev->driver->driver_features & DRIVER_GEM)) {
436 DRM_ERROR("does not support GEM.\n");
440 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
444 static int exynos_drm_gem_mmap_buffer(struct file *filp,
445 struct vm_area_struct *vma)
447 struct drm_gem_object *obj = filp->private_data;
448 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
449 struct exynos_drm_gem_buf *buffer;
450 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
453 DRM_DEBUG_KMS("%s\n", __FILE__);
455 vma->vm_flags |= (VM_IO | VM_RESERVED);
457 /* in case of direct mapping, always having non-cachable attribute */
458 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
460 vm_size = usize = vma->vm_end - vma->vm_start;
463 * a buffer contains information to physically continuous memory
464 * allocated by user request or at framebuffer creation.
466 buffer = exynos_gem_obj->buffer;
468 /* check if user-requested size is valid. */
469 if (vm_size > buffer->size)
472 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
478 vma->vm_flags |= VM_MIXEDMAP;
481 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
483 DRM_ERROR("failed to remap user space.\n");
492 * get page frame number to physical memory to be mapped
495 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
498 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
500 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
501 vma->vm_page_prot)) {
502 DRM_ERROR("failed to remap pfn range.\n");
510 static const struct file_operations exynos_drm_gem_fops = {
511 .mmap = exynos_drm_gem_mmap_buffer,
514 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file_priv)
517 struct drm_exynos_gem_mmap *args = data;
518 struct drm_gem_object *obj;
521 DRM_DEBUG_KMS("%s\n", __FILE__);
523 if (!(dev->driver->driver_features & DRIVER_GEM)) {
524 DRM_ERROR("does not support GEM.\n");
528 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
530 DRM_ERROR("failed to lookup gem object.\n");
534 obj->filp->f_op = &exynos_drm_gem_fops;
535 obj->filp->private_data = obj;
537 addr = vm_mmap(obj->filp, 0, args->size,
538 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
540 drm_gem_object_unreference_unlocked(obj);
542 if (IS_ERR((void *)addr))
543 return PTR_ERR((void *)addr);
547 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
552 static void cpu_acquire_kds_cb_fn(void *param1, void *param2)
554 struct completion* completion = (struct completion *)param1;
555 complete(completion);
558 int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
559 struct drm_file *file)
561 struct drm_exynos_gem_cpu_acquire *args = data;
562 struct exynos_drm_file_private *file_priv = file->driver_priv;
563 struct drm_gem_object *obj;
564 struct exynos_drm_gem_obj *exynos_gem_obj;
565 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
566 struct kds_resource *kds;
567 struct kds_resource_set *rset;
568 unsigned long exclusive;
569 struct kds_callback callback;
570 DECLARE_COMPLETION_ONSTACK(completion);
572 struct exynos_drm_gem_obj_node *gem_node;
575 DRM_DEBUG_KMS("%s\n", __FILE__);
577 mutex_lock(&dev->struct_mutex);
579 if (!(dev->driver->driver_features & DRIVER_GEM)) {
580 DRM_ERROR("does not support GEM.\n");
585 obj = drm_gem_object_lookup(dev, file, args->handle);
587 DRM_ERROR("failed to lookup gem object.\n");
592 exynos_gem_obj = to_exynos_gem_obj(obj);
594 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
595 if (exynos_gem_obj->base.export_dma_buf == NULL) {
596 /* If there is no dmabuf present, there is no cross-process/
597 * cross-device sharing and sync is unnecessary.
604 if ((args->flags & DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE) != 0)
606 kds = &exynos_gem_obj->base.export_dma_buf->kds;
607 kds_callback_init(&callback, 1, &cpu_acquire_kds_cb_fn);
608 ret = kds_async_waitall(&rset, KDS_FLAG_LOCKED_WAIT, &callback,
609 &completion, NULL, 1, &exclusive, &kds);
610 mutex_unlock(&dev->struct_mutex);
612 if (!IS_ERR_VALUE(ret))
613 ret = wait_for_completion_interruptible(&completion);
614 kds_callback_term(&callback);
616 mutex_lock(&dev->struct_mutex);
617 if (IS_ERR_VALUE(ret))
621 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
623 DRM_ERROR("failed to allocate eyxnos_drm_gem_obj_node.\n");
628 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
629 exynos_gem_obj->resource_set = rset;
632 gem_node->exynos_gem_obj = exynos_gem_obj;
633 list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
634 mutex_unlock(&dev->struct_mutex);
639 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
640 kds_resource_set_release_sync(&rset);
644 drm_gem_object_unreference(obj);
647 mutex_unlock(&dev->struct_mutex);
651 int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
652 struct drm_file *file)
654 struct drm_exynos_gem_cpu_release *args = data;
655 struct exynos_drm_file_private *file_priv = file->driver_priv;
656 struct drm_gem_object *obj;
657 struct exynos_drm_gem_obj *exynos_gem_obj;
658 struct list_head *cur;
661 DRM_DEBUG_KMS("%s\n", __FILE__);
663 mutex_lock(&dev->struct_mutex);
665 if (!(dev->driver->driver_features & DRIVER_GEM)) {
666 DRM_ERROR("does not support GEM.\n");
671 obj = drm_gem_object_lookup(dev, file, args->handle);
673 DRM_ERROR("failed to lookup gem object.\n");
678 exynos_gem_obj = to_exynos_gem_obj(obj);
680 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
681 if (exynos_gem_obj->base.export_dma_buf == NULL) {
682 /* If there is no dmabuf present, there is no cross-process/
683 * cross-device sharing and sync is unnecessary.
690 list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
691 struct exynos_drm_gem_obj_node *node = list_entry(
692 cur, struct exynos_drm_gem_obj_node, list);
693 if (node->exynos_gem_obj == exynos_gem_obj)
696 if (cur == &file_priv->gem_cpu_acquire_list) {
697 DRM_ERROR("gem object not acquired for current process.\n");
702 #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
703 /* kds_resource_set_release NULLs the pointer */
704 BUG_ON(exynos_gem_obj->resource_set == NULL);
705 kds_resource_set_release(&exynos_gem_obj->resource_set);
709 kfree(list_entry(cur, struct exynos_drm_gem_obj_node, list));
710 /* unreference for the reference held since cpu_acquire_ioctl */
711 drm_gem_object_unreference(obj);
715 /* unreference for the reference from drm_gem_object_lookup() */
716 drm_gem_object_unreference(obj);
719 mutex_unlock(&dev->struct_mutex);
723 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
725 struct exynos_drm_gem_obj *exynos_gem_obj;
726 struct exynos_drm_gem_buf *buf;
728 DRM_DEBUG_KMS("%s\n", __FILE__);
730 exynos_gem_obj = to_exynos_gem_obj(obj);
731 buf = exynos_gem_obj->buffer;
733 if (obj->import_attach)
734 drm_prime_gem_destroy(obj, buf->sgt);
736 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
739 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
740 struct drm_device *dev,
741 struct drm_mode_create_dumb *args)
743 struct exynos_drm_gem_obj *exynos_gem_obj;
746 DRM_DEBUG_KMS("%s\n", __FILE__);
748 if (!(args->flags & EXYNOS_BO_NONCONTIG)) {
749 DRM_ERROR("contig buffer allocation not supported.\n");
751 * HACK: Currently we do not support CONTIG buffer
752 * allocation from user space. The drm framework
753 * supports non-contig buffers only. In the next versions
754 * the option to choose contig/non-contig buffers itself
755 * is not supported through this flag. For now, we just
762 * alocate memory to be used for framebuffer.
763 * - this callback would be called by user application
764 * with DRM_IOCTL_MODE_CREATE_DUMB command.
767 args->pitch = args->width * ALIGN(args->bpp, 8) >> 3;
768 args->pitch = round_up(args->pitch, 64);
770 args->size = PAGE_ALIGN(args->pitch * args->height);
772 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
773 if (IS_ERR(exynos_gem_obj))
774 return PTR_ERR(exynos_gem_obj);
776 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
779 exynos_drm_gem_destroy(exynos_gem_obj);
786 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
787 struct drm_device *dev, uint32_t handle,
790 struct exynos_drm_gem_obj *exynos_gem_obj;
791 struct drm_gem_object *obj;
794 DRM_DEBUG_KMS("%s\n", __FILE__);
796 mutex_lock(&dev->struct_mutex);
799 * get offset of memory allocated for drm framebuffer.
800 * - this callback would be called by user application
801 * with DRM_IOCTL_MODE_MAP_DUMB command.
804 obj = drm_gem_object_lookup(dev, file_priv, handle);
806 DRM_ERROR("failed to lookup gem object.\n");
811 exynos_gem_obj = to_exynos_gem_obj(obj);
813 if (!exynos_gem_obj->base.map_list.map) {
814 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
819 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
820 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
823 drm_gem_object_unreference(obj);
825 mutex_unlock(&dev->struct_mutex);
829 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
830 struct drm_device *dev,
835 DRM_DEBUG_KMS("%s\n", __FILE__);
838 * obj->refcount and obj->handle_count are decreased and
839 * if both them are 0 then exynos_drm_gem_free_object()
840 * would be called by callback to release resources.
842 ret = drm_gem_handle_delete(file_priv, handle);
844 DRM_ERROR("failed to delete drm_gem_handle.\n");
851 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
853 struct drm_gem_object *obj = vma->vm_private_data;
854 struct drm_device *dev = obj->dev;
855 unsigned long f_vaddr;
859 page_offset = ((unsigned long)vmf->virtual_address -
860 vma->vm_start) >> PAGE_SHIFT;
861 f_vaddr = (unsigned long)vmf->virtual_address;
863 mutex_lock(&dev->struct_mutex);
865 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
867 DRM_ERROR("failed to map pages.\n");
869 mutex_unlock(&dev->struct_mutex);
871 return convert_to_vm_err_msg(ret);
874 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
878 DRM_DEBUG_KMS("%s\n", __FILE__);
880 /* set vm_area_struct. */
881 ret = drm_gem_mmap(filp, vma);
883 DRM_ERROR("failed to mmap.\n");
887 vma->vm_flags &= ~VM_PFNMAP;
888 vma->vm_flags |= VM_MIXEDMAP;