3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/shmem_fs.h>
30 #include <drm/exynos_drm.h>
32 #include "exynos_drm_drv.h"
33 #include "exynos_drm_gem.h"
34 #include "exynos_drm_buf.h"
36 static unsigned int convert_to_vm_err_msg(int msg)
44 out_msg = VM_FAULT_NOPAGE;
48 out_msg = VM_FAULT_OOM;
52 out_msg = VM_FAULT_SIGBUS;
59 static int check_gem_flags(unsigned int flags)
61 if (flags & ~(EXYNOS_BO_MASK)) {
62 DRM_ERROR("invalid flags.\n");
69 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
71 if (!IS_NONCONTIG_BUFFER(flags)) {
73 return roundup(size, SECTION_SIZE);
74 else if (size >= SZ_64K)
75 return roundup(size, SZ_64K);
80 return roundup(size, PAGE_SIZE);
83 static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
87 struct address_space *mapping;
88 struct page *p, **pages;
91 /* This is the shared memory object that backs the GEM resource */
92 inode = obj->filp->f_path.dentry->d_inode;
93 mapping = inode->i_mapping;
95 npages = obj->size >> PAGE_SHIFT;
97 pages = drm_malloc_ab(npages, sizeof(struct page *));
99 return ERR_PTR(-ENOMEM);
101 gfpmask |= mapping_gfp_mask(mapping);
103 for (i = 0; i < npages; i++) {
104 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
114 page_cache_release(pages[i]);
116 drm_free_large(pages);
117 return ERR_PTR(PTR_ERR(p));
120 static void exynos_gem_put_pages(struct drm_gem_object *obj,
122 bool dirty, bool accessed)
126 npages = obj->size >> PAGE_SHIFT;
128 for (i = 0; i < npages; i++) {
130 set_page_dirty(pages[i]);
133 mark_page_accessed(pages[i]);
135 /* Undo the reference we took when populating the table */
136 page_cache_release(pages[i]);
139 drm_free_large(pages);
142 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
143 struct vm_area_struct *vma,
144 unsigned long f_vaddr,
147 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
148 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
152 unsigned long usize = buf->size;
158 pfn = page_to_pfn(buf->pages[page_offset++]);
159 vm_insert_mixed(vma, f_vaddr, pfn);
160 f_vaddr += PAGE_SIZE;
167 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
169 return vm_insert_mixed(vma, f_vaddr, pfn);
172 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
174 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
175 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
176 struct scatterlist *sgl;
178 unsigned int npages, i = 0;
182 DRM_DEBUG_KMS("already allocated.\n");
186 pages = exynos_gem_get_pages(obj, GFP_KERNEL);
188 DRM_ERROR("failed to get pages.\n");
189 return PTR_ERR(pages);
192 npages = obj->size >> PAGE_SHIFT;
194 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
196 DRM_ERROR("failed to allocate sg table.\n");
201 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
203 DRM_ERROR("failed to initialize sg table.\n");
210 /* set all pages to sg list. */
212 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
213 sg_dma_address(sgl) = page_to_phys(pages[i]);
218 /* add some codes for UNCACHED type here. TODO */
226 exynos_gem_put_pages(obj, pages, true, false);
231 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
233 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
234 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
237 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
238 * allocated at gem fault handler.
240 sg_free_table(buf->sgt);
244 exynos_gem_put_pages(obj, buf->pages, true, false);
247 /* add some codes for UNCACHED type here. TODO */
250 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
251 struct drm_file *file_priv,
252 unsigned int *handle)
257 * allocate a id of idr table where the obj is registered
258 * and handle has the id what user can see.
260 ret = drm_gem_handle_create(file_priv, obj, handle);
264 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
266 /* drop reference from allocate - handle holds it now. */
267 drm_gem_object_unreference_unlocked(obj);
272 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
274 struct drm_gem_object *obj;
276 DRM_DEBUG_KMS("%s\n", __FILE__);
281 obj = &exynos_gem_obj->base;
283 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
285 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
286 exynos_gem_obj->buffer->pages)
287 exynos_drm_gem_put_pages(obj);
289 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
290 exynos_gem_obj->buffer);
292 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
293 exynos_gem_obj->buffer = NULL;
295 if (obj->map_list.map)
296 drm_gem_free_mmap_offset(obj);
298 /* release file pointer to gem object. */
299 drm_gem_object_release(obj);
301 kfree(exynos_gem_obj);
302 exynos_gem_obj = NULL;
305 static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
308 struct exynos_drm_gem_obj *exynos_gem_obj;
309 struct drm_gem_object *obj;
312 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
313 if (!exynos_gem_obj) {
314 DRM_ERROR("failed to allocate exynos gem object\n");
318 exynos_gem_obj->size = size;
319 obj = &exynos_gem_obj->base;
321 ret = drm_gem_object_init(dev, obj, size);
323 DRM_ERROR("failed to initialize gem object\n");
324 kfree(exynos_gem_obj);
328 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
330 return exynos_gem_obj;
333 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
337 struct exynos_drm_gem_obj *exynos_gem_obj;
338 struct exynos_drm_gem_buf *buf;
342 DRM_ERROR("invalid size.\n");
343 return ERR_PTR(-EINVAL);
346 size = roundup_gem_size(size, flags);
347 DRM_DEBUG_KMS("%s\n", __FILE__);
349 ret = check_gem_flags(flags);
353 buf = exynos_drm_init_buf(dev, size);
355 return ERR_PTR(-ENOMEM);
357 exynos_gem_obj = exynos_drm_gem_init(dev, size);
358 if (!exynos_gem_obj) {
363 exynos_gem_obj->buffer = buf;
365 /* set memory type and cache attribute from user side. */
366 exynos_gem_obj->flags = flags;
369 * allocate all pages as desired size if user wants to allocate
370 * physically non-continuous memory.
372 if (flags & EXYNOS_BO_NONCONTIG) {
373 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
375 drm_gem_object_release(&exynos_gem_obj->base);
379 ret = exynos_drm_alloc_buf(dev, buf, flags);
381 drm_gem_object_release(&exynos_gem_obj->base);
386 return exynos_gem_obj;
389 exynos_drm_fini_buf(dev, buf);
393 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
394 struct drm_file *file_priv)
396 struct drm_exynos_gem_create *args = data;
397 struct exynos_drm_gem_obj *exynos_gem_obj;
400 DRM_DEBUG_KMS("%s\n", __FILE__);
402 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
403 if (IS_ERR(exynos_gem_obj))
404 return PTR_ERR(exynos_gem_obj);
406 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
409 exynos_drm_gem_destroy(exynos_gem_obj);
416 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
417 unsigned int gem_handle,
418 struct drm_file *file_priv)
420 struct exynos_drm_gem_obj *exynos_gem_obj;
421 struct drm_gem_object *obj;
423 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
425 DRM_ERROR("failed to lookup gem object.\n");
426 return ERR_PTR(-EINVAL);
429 exynos_gem_obj = to_exynos_gem_obj(obj);
431 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
432 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
433 drm_gem_object_unreference_unlocked(obj);
436 return ERR_PTR(-EINVAL);
439 return &exynos_gem_obj->buffer->dma_addr;
442 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
443 unsigned int gem_handle,
444 struct drm_file *file_priv)
446 struct exynos_drm_gem_obj *exynos_gem_obj;
447 struct drm_gem_object *obj;
449 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
451 DRM_ERROR("failed to lookup gem object.\n");
455 exynos_gem_obj = to_exynos_gem_obj(obj);
457 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
458 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
459 drm_gem_object_unreference_unlocked(obj);
465 drm_gem_object_unreference_unlocked(obj);
468 * decrease obj->refcount one more time because we has already
469 * increased it at exynos_drm_gem_get_dma_addr().
471 drm_gem_object_unreference_unlocked(obj);
474 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
475 struct drm_file *file_priv)
477 struct drm_exynos_gem_map_off *args = data;
479 DRM_DEBUG_KMS("%s\n", __FILE__);
481 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
482 args->handle, (unsigned long)args->offset);
484 if (!(dev->driver->driver_features & DRIVER_GEM)) {
485 DRM_ERROR("does not support GEM.\n");
489 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
493 static int exynos_drm_gem_mmap_buffer(struct file *filp,
494 struct vm_area_struct *vma)
496 struct drm_gem_object *obj = filp->private_data;
497 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
498 struct exynos_drm_gem_buf *buffer;
499 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
502 DRM_DEBUG_KMS("%s\n", __FILE__);
504 vma->vm_flags |= (VM_IO | VM_RESERVED);
506 /* in case of direct mapping, always having non-cachable attribute */
507 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
509 vm_size = usize = vma->vm_end - vma->vm_start;
512 * a buffer contains information to physically continuous memory
513 * allocated by user request or at framebuffer creation.
515 buffer = exynos_gem_obj->buffer;
517 /* check if user-requested size is valid. */
518 if (vm_size > buffer->size)
521 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
530 DRM_ERROR("failed to remap user space.\n");
539 * get page frame number to physical memory to be mapped
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
557 static const struct file_operations exynos_drm_gem_fops = {
558 .mmap = exynos_drm_gem_mmap_buffer,
561 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *file_priv)
564 struct drm_exynos_gem_mmap *args = data;
565 struct drm_gem_object *obj;
568 DRM_DEBUG_KMS("%s\n", __FILE__);
570 if (!(dev->driver->driver_features & DRIVER_GEM)) {
571 DRM_ERROR("does not support GEM.\n");
575 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
577 DRM_ERROR("failed to lookup gem object.\n");
581 obj->filp->f_op = &exynos_drm_gem_fops;
582 obj->filp->private_data = obj;
584 down_write(¤t->mm->mmap_sem);
585 addr = do_mmap(obj->filp, 0, args->size,
586 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
587 up_write(¤t->mm->mmap_sem);
589 drm_gem_object_unreference_unlocked(obj);
591 if (IS_ERR((void *)addr))
592 return PTR_ERR((void *)addr);
596 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
601 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
603 DRM_DEBUG_KMS("%s\n", __FILE__);
608 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
610 DRM_DEBUG_KMS("%s\n", __FILE__);
612 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
615 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
616 struct drm_device *dev,
617 struct drm_mode_create_dumb *args)
619 struct exynos_drm_gem_obj *exynos_gem_obj;
622 DRM_DEBUG_KMS("%s\n", __FILE__);
625 * alocate memory to be used for framebuffer.
626 * - this callback would be called by user application
627 * with DRM_IOCTL_MODE_CREATE_DUMB command.
630 args->pitch = args->width * args->bpp >> 3;
631 args->size = PAGE_ALIGN(args->pitch * args->height);
633 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
634 if (IS_ERR(exynos_gem_obj))
635 return PTR_ERR(exynos_gem_obj);
637 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
640 exynos_drm_gem_destroy(exynos_gem_obj);
647 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
648 struct drm_device *dev, uint32_t handle,
651 struct exynos_drm_gem_obj *exynos_gem_obj;
652 struct drm_gem_object *obj;
655 DRM_DEBUG_KMS("%s\n", __FILE__);
657 mutex_lock(&dev->struct_mutex);
660 * get offset of memory allocated for drm framebuffer.
661 * - this callback would be called by user application
662 * with DRM_IOCTL_MODE_MAP_DUMB command.
665 obj = drm_gem_object_lookup(dev, file_priv, handle);
667 DRM_ERROR("failed to lookup gem object.\n");
672 exynos_gem_obj = to_exynos_gem_obj(obj);
674 if (!exynos_gem_obj->base.map_list.map) {
675 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
680 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
681 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
684 drm_gem_object_unreference(obj);
686 mutex_unlock(&dev->struct_mutex);
690 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
691 struct drm_device *dev,
696 DRM_DEBUG_KMS("%s\n", __FILE__);
699 * obj->refcount and obj->handle_count are decreased and
700 * if both them are 0 then exynos_drm_gem_free_object()
701 * would be called by callback to release resources.
703 ret = drm_gem_handle_delete(file_priv, handle);
705 DRM_ERROR("failed to delete drm_gem_handle.\n");
712 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
714 struct drm_gem_object *obj = vma->vm_private_data;
715 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
716 struct drm_device *dev = obj->dev;
717 unsigned long f_vaddr;
721 page_offset = ((unsigned long)vmf->virtual_address -
722 vma->vm_start) >> PAGE_SHIFT;
723 f_vaddr = (unsigned long)vmf->virtual_address;
725 mutex_lock(&dev->struct_mutex);
728 * allocate all pages as desired size if user wants to allocate
729 * physically non-continuous memory.
731 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
732 ret = exynos_drm_gem_get_pages(obj);
737 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
739 DRM_ERROR("failed to map pages.\n");
742 mutex_unlock(&dev->struct_mutex);
744 return convert_to_vm_err_msg(ret);
747 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
751 DRM_DEBUG_KMS("%s\n", __FILE__);
753 /* set vm_area_struct. */
754 ret = drm_gem_mmap(filp, vma);
756 DRM_ERROR("failed to mmap.\n");
760 vma->vm_flags &= ~VM_PFNMAP;
761 vma->vm_flags |= VM_MIXEDMAP;