3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @task: used for debugging
77 * A client represents a list of buffers this client may access.
78 * The mutex stored here is used to protect both handles tree
79 * as well as the handles themselves, and should be held while modifying either.
83 struct ion_device *dev;
84 struct rb_root handles;
88 struct task_struct *task;
90 struct dentry *debug_root;
94 * ion_handle - a client local reference to a buffer
95 * @ref: reference count
96 * @client: back pointer to the client the buffer resides in
97 * @buffer: pointer to the buffer
98 * @node: node in the client's handle rbtree
99 * @kmap_cnt: count of times this client has mapped to kernel
100 * @id: client-unique id allocated by client->idr
102 * Modifications to node, map_cnt or mapping should be protected by the
103 * lock in the client. Other fields are never changed after initialization.
107 struct ion_client *client;
108 struct ion_buffer *buffer;
110 unsigned int kmap_cnt;
114 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
116 return (buffer->flags & ION_FLAG_CACHED) &&
117 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
120 bool ion_buffer_cached(struct ion_buffer *buffer)
122 return !!(buffer->flags & ION_FLAG_CACHED);
125 static inline struct page *ion_buffer_page(struct page *page)
127 return (struct page *)((unsigned long)page & ~(1UL));
130 static inline bool ion_buffer_page_is_dirty(struct page *page)
132 return !!((unsigned long)page & 1UL);
135 static inline void ion_buffer_page_dirty(struct page **page)
137 *page = (struct page *)((unsigned long)(*page) | 1UL);
140 static inline void ion_buffer_page_clean(struct page **page)
142 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
145 /* this function should only be called while dev->lock is held */
146 static void ion_buffer_add(struct ion_device *dev,
147 struct ion_buffer *buffer)
149 struct rb_node **p = &dev->buffers.rb_node;
150 struct rb_node *parent = NULL;
151 struct ion_buffer *entry;
155 entry = rb_entry(parent, struct ion_buffer, node);
157 if (buffer < entry) {
159 } else if (buffer > entry) {
162 pr_err("%s: buffer already found.", __func__);
167 rb_link_node(&buffer->node, parent, p);
168 rb_insert_color(&buffer->node, &dev->buffers);
171 /* this function should only be called while dev->lock is held */
172 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
173 struct ion_device *dev,
178 struct ion_buffer *buffer;
179 struct sg_table *table;
180 struct scatterlist *sg;
183 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
185 return ERR_PTR(-ENOMEM);
188 buffer->flags = flags;
189 kref_init(&buffer->ref);
191 ret = heap->ops->allocate(heap, buffer, len, align, flags);
194 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
197 ion_heap_freelist_drain(heap, 0);
198 ret = heap->ops->allocate(heap, buffer, len, align,
207 table = heap->ops->map_dma(heap, buffer);
208 if (WARN_ONCE(table == NULL,
209 "heap->ops->map_dma should return ERR_PTR on error"))
210 table = ERR_PTR(-EINVAL);
212 heap->ops->free(buffer);
214 return ERR_PTR(PTR_ERR(table));
216 buffer->sg_table = table;
217 if (ion_buffer_fault_user_mappings(buffer)) {
218 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
219 struct scatterlist *sg;
222 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
223 if (!buffer->pages) {
228 for_each_sg(table->sgl, sg, table->nents, i) {
229 struct page *page = sg_page(sg);
231 for (j = 0; j < sg->length / PAGE_SIZE; j++)
232 buffer->pages[k++] = page++;
241 INIT_LIST_HEAD(&buffer->vmas);
242 mutex_init(&buffer->lock);
243 /* this will set up dma addresses for the sglist -- it is not
244 technically correct as per the dma api -- a specific
245 device isn't really taking ownership here. However, in practice on
246 our systems the only dma_address space is physical addresses.
247 Additionally, we can't afford the overhead of invalidating every
248 allocation via dma_map_sg. The implicit contract here is that
249 memory comming from the heaps is ready for dma, ie if it has a
250 cached mapping that mapping has been invalidated */
251 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
252 sg_dma_address(sg) = sg_phys(sg);
253 mutex_lock(&dev->buffer_lock);
254 ion_buffer_add(dev, buffer);
255 mutex_unlock(&dev->buffer_lock);
259 heap->ops->unmap_dma(heap, buffer);
260 heap->ops->free(buffer);
263 vfree(buffer->pages);
269 void ion_buffer_destroy(struct ion_buffer *buffer)
271 if (WARN_ON(buffer->kmap_cnt > 0))
272 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
273 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
274 buffer->heap->ops->free(buffer);
276 vfree(buffer->pages);
280 static void _ion_buffer_destroy(struct kref *kref)
282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283 struct ion_heap *heap = buffer->heap;
284 struct ion_device *dev = buffer->dev;
286 mutex_lock(&dev->buffer_lock);
287 rb_erase(&buffer->node, &dev->buffers);
288 mutex_unlock(&dev->buffer_lock);
290 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
291 ion_heap_freelist_add(heap, buffer);
293 ion_buffer_destroy(buffer);
296 static void ion_buffer_get(struct ion_buffer *buffer)
298 kref_get(&buffer->ref);
301 static int ion_buffer_put(struct ion_buffer *buffer)
303 return kref_put(&buffer->ref, _ion_buffer_destroy);
306 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
308 mutex_lock(&buffer->lock);
309 buffer->handle_count++;
310 mutex_unlock(&buffer->lock);
313 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316 * when a buffer is removed from a handle, if it is not in
317 * any other handles, copy the taskcomm and the pid of the
318 * process it's being removed from into the buffer. At this
319 * point there will be no way to track what processes this buffer is
320 * being used by, it only exists as a dma_buf file descriptor.
321 * The taskcomm and pid can provide a debug hint as to where this fd
324 mutex_lock(&buffer->lock);
325 buffer->handle_count--;
326 BUG_ON(buffer->handle_count < 0);
327 if (!buffer->handle_count) {
328 struct task_struct *task;
330 task = current->group_leader;
331 get_task_comm(buffer->task_comm, task);
332 buffer->pid = task_pid_nr(task);
334 mutex_unlock(&buffer->lock);
337 static struct ion_handle *ion_handle_create(struct ion_client *client,
338 struct ion_buffer *buffer)
340 struct ion_handle *handle;
342 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
344 return ERR_PTR(-ENOMEM);
345 kref_init(&handle->ref);
346 RB_CLEAR_NODE(&handle->node);
347 handle->client = client;
348 ion_buffer_get(buffer);
349 ion_buffer_add_to_handle(buffer);
350 handle->buffer = buffer;
355 static void ion_handle_kmap_put(struct ion_handle *);
357 static void ion_handle_destroy(struct kref *kref)
359 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
360 struct ion_client *client = handle->client;
361 struct ion_buffer *buffer = handle->buffer;
363 mutex_lock(&buffer->lock);
364 while (handle->kmap_cnt)
365 ion_handle_kmap_put(handle);
366 mutex_unlock(&buffer->lock);
368 idr_remove(&client->idr, handle->id);
369 if (!RB_EMPTY_NODE(&handle->node))
370 rb_erase(&handle->node, &client->handles);
372 ion_buffer_remove_from_handle(buffer);
373 ion_buffer_put(buffer);
378 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
380 return handle->buffer;
383 static void ion_handle_get(struct ion_handle *handle)
385 kref_get(&handle->ref);
388 static int ion_handle_put(struct ion_handle *handle)
390 struct ion_client *client = handle->client;
393 mutex_lock(&client->lock);
394 ret = kref_put(&handle->ref, ion_handle_destroy);
395 mutex_unlock(&client->lock);
400 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
401 struct ion_buffer *buffer)
403 struct rb_node *n = client->handles.rb_node;
406 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
407 if (buffer < entry->buffer)
409 else if (buffer > entry->buffer)
414 return ERR_PTR(-EINVAL);
417 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
420 struct ion_handle *handle;
422 mutex_lock(&client->lock);
423 handle = idr_find(&client->idr, id);
425 ion_handle_get(handle);
426 mutex_unlock(&client->lock);
428 return handle ? handle : ERR_PTR(-EINVAL);
431 static bool ion_handle_validate(struct ion_client *client,
432 struct ion_handle *handle)
434 WARN_ON(!mutex_is_locked(&client->lock));
435 return idr_find(&client->idr, handle->id) == handle;
438 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
441 struct rb_node **p = &client->handles.rb_node;
442 struct rb_node *parent = NULL;
443 struct ion_handle *entry;
445 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
453 entry = rb_entry(parent, struct ion_handle, node);
455 if (handle->buffer < entry->buffer)
457 else if (handle->buffer > entry->buffer)
460 WARN(1, "%s: buffer already found.", __func__);
463 rb_link_node(&handle->node, parent, p);
464 rb_insert_color(&handle->node, &client->handles);
469 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
470 size_t align, unsigned int heap_id_mask,
473 struct ion_handle *handle;
474 struct ion_device *dev = client->dev;
475 struct ion_buffer *buffer = NULL;
476 struct ion_heap *heap;
479 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
480 len, align, heap_id_mask, flags);
482 * traverse the list of heaps available in this system in priority
483 * order. If the heap type is supported by the client, and matches the
484 * request of the caller allocate from it. Repeat until allocate has
485 * succeeded or all heaps have been tried
487 len = PAGE_ALIGN(len);
490 return ERR_PTR(-EINVAL);
492 down_read(&dev->lock);
493 plist_for_each_entry(heap, &dev->heaps, node) {
494 /* if the caller didn't specify this heap id */
495 if (!((1 << heap->id) & heap_id_mask))
497 buffer = ion_buffer_create(heap, dev, len, align, flags);
504 return ERR_PTR(-ENODEV);
507 return ERR_PTR(PTR_ERR(buffer));
509 handle = ion_handle_create(client, buffer);
512 * ion_buffer_create will create a buffer with a ref_cnt of 1,
513 * and ion_handle_create will take a second reference, drop one here
515 ion_buffer_put(buffer);
520 mutex_lock(&client->lock);
521 ret = ion_handle_add(client, handle);
522 mutex_unlock(&client->lock);
524 ion_handle_put(handle);
525 handle = ERR_PTR(ret);
530 EXPORT_SYMBOL(ion_alloc);
532 void ion_free(struct ion_client *client, struct ion_handle *handle)
536 BUG_ON(client != handle->client);
538 mutex_lock(&client->lock);
539 valid_handle = ion_handle_validate(client, handle);
542 WARN(1, "%s: invalid handle passed to free.\n", __func__);
543 mutex_unlock(&client->lock);
546 mutex_unlock(&client->lock);
547 ion_handle_put(handle);
549 EXPORT_SYMBOL(ion_free);
551 int ion_phys(struct ion_client *client, struct ion_handle *handle,
552 ion_phys_addr_t *addr, size_t *len)
554 struct ion_buffer *buffer;
557 mutex_lock(&client->lock);
558 if (!ion_handle_validate(client, handle)) {
559 mutex_unlock(&client->lock);
563 buffer = handle->buffer;
565 if (!buffer->heap->ops->phys) {
566 pr_err("%s: ion_phys is not implemented by this heap.\n",
568 mutex_unlock(&client->lock);
571 mutex_unlock(&client->lock);
572 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
575 EXPORT_SYMBOL(ion_phys);
577 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
581 if (buffer->kmap_cnt) {
583 return buffer->vaddr;
585 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
586 if (WARN_ONCE(vaddr == NULL,
587 "heap->ops->map_kernel should return ERR_PTR on error"))
588 return ERR_PTR(-EINVAL);
591 buffer->vaddr = vaddr;
596 static void *ion_handle_kmap_get(struct ion_handle *handle)
598 struct ion_buffer *buffer = handle->buffer;
601 if (handle->kmap_cnt) {
603 return buffer->vaddr;
605 vaddr = ion_buffer_kmap_get(buffer);
612 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
615 if (!buffer->kmap_cnt) {
616 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
617 buffer->vaddr = NULL;
621 static void ion_handle_kmap_put(struct ion_handle *handle)
623 struct ion_buffer *buffer = handle->buffer;
626 if (!handle->kmap_cnt)
627 ion_buffer_kmap_put(buffer);
630 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
632 struct ion_buffer *buffer;
635 mutex_lock(&client->lock);
636 if (!ion_handle_validate(client, handle)) {
637 pr_err("%s: invalid handle passed to map_kernel.\n",
639 mutex_unlock(&client->lock);
640 return ERR_PTR(-EINVAL);
643 buffer = handle->buffer;
645 if (!handle->buffer->heap->ops->map_kernel) {
646 pr_err("%s: map_kernel is not implemented by this heap.\n",
648 mutex_unlock(&client->lock);
649 return ERR_PTR(-ENODEV);
652 mutex_lock(&buffer->lock);
653 vaddr = ion_handle_kmap_get(handle);
654 mutex_unlock(&buffer->lock);
655 mutex_unlock(&client->lock);
658 EXPORT_SYMBOL(ion_map_kernel);
660 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
662 struct ion_buffer *buffer;
664 mutex_lock(&client->lock);
665 buffer = handle->buffer;
666 mutex_lock(&buffer->lock);
667 ion_handle_kmap_put(handle);
668 mutex_unlock(&buffer->lock);
669 mutex_unlock(&client->lock);
671 EXPORT_SYMBOL(ion_unmap_kernel);
673 static int ion_debug_client_show(struct seq_file *s, void *unused)
675 struct ion_client *client = s->private;
677 size_t sizes[ION_NUM_HEAP_IDS] = {0};
678 const char *names[ION_NUM_HEAP_IDS] = {NULL};
681 mutex_lock(&client->lock);
682 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
683 struct ion_handle *handle = rb_entry(n, struct ion_handle,
685 unsigned int id = handle->buffer->heap->id;
688 names[id] = handle->buffer->heap->name;
689 sizes[id] += handle->buffer->size;
691 mutex_unlock(&client->lock);
693 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
694 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
697 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
702 static int ion_debug_client_open(struct inode *inode, struct file *file)
704 return single_open(file, ion_debug_client_show, inode->i_private);
707 static const struct file_operations debug_client_fops = {
708 .open = ion_debug_client_open,
711 .release = single_release,
714 struct ion_client *ion_client_create(struct ion_device *dev,
717 struct ion_client *client;
718 struct task_struct *task;
720 struct rb_node *parent = NULL;
721 struct ion_client *entry;
724 get_task_struct(current->group_leader);
725 task_lock(current->group_leader);
726 pid = task_pid_nr(current->group_leader);
727 /* don't bother to store task struct for kernel threads,
728 they can't be killed anyway */
729 if (current->group_leader->flags & PF_KTHREAD) {
730 put_task_struct(current->group_leader);
733 task = current->group_leader;
735 task_unlock(current->group_leader);
737 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
740 put_task_struct(current->group_leader);
741 return ERR_PTR(-ENOMEM);
745 client->handles = RB_ROOT;
746 idr_init(&client->idr);
747 mutex_init(&client->lock);
752 down_write(&dev->lock);
753 p = &dev->clients.rb_node;
756 entry = rb_entry(parent, struct ion_client, node);
760 else if (client > entry)
763 rb_link_node(&client->node, parent, p);
764 rb_insert_color(&client->node, &dev->clients);
766 client->debug_root = debugfs_create_file(name, 0664,
767 dev->clients_debug_root,
768 client, &debug_client_fops);
769 if (!client->debug_root) {
770 char buf[256], *path;
771 path = dentry_path(dev->clients_debug_root, buf, 256);
772 pr_err("Failed to create client debugfs at %s/%s\n",
776 up_write(&dev->lock);
780 EXPORT_SYMBOL(ion_client_create);
782 void ion_client_destroy(struct ion_client *client)
784 struct ion_device *dev = client->dev;
787 pr_debug("%s: %d\n", __func__, __LINE__);
788 while ((n = rb_first(&client->handles))) {
789 struct ion_handle *handle = rb_entry(n, struct ion_handle,
791 ion_handle_destroy(&handle->ref);
794 idr_destroy(&client->idr);
796 down_write(&dev->lock);
798 put_task_struct(client->task);
799 rb_erase(&client->node, &dev->clients);
800 debugfs_remove_recursive(client->debug_root);
801 up_write(&dev->lock);
805 EXPORT_SYMBOL(ion_client_destroy);
807 struct sg_table *ion_sg_table(struct ion_client *client,
808 struct ion_handle *handle)
810 struct ion_buffer *buffer;
811 struct sg_table *table;
813 mutex_lock(&client->lock);
814 if (!ion_handle_validate(client, handle)) {
815 pr_err("%s: invalid handle passed to map_dma.\n",
817 mutex_unlock(&client->lock);
818 return ERR_PTR(-EINVAL);
820 buffer = handle->buffer;
821 table = buffer->sg_table;
822 mutex_unlock(&client->lock);
825 EXPORT_SYMBOL(ion_sg_table);
827 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
829 enum dma_data_direction direction);
831 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
832 enum dma_data_direction direction)
834 struct dma_buf *dmabuf = attachment->dmabuf;
835 struct ion_buffer *buffer = dmabuf->priv;
837 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
838 return buffer->sg_table;
841 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
842 struct sg_table *table,
843 enum dma_data_direction direction)
847 void ion_pages_sync_for_device(struct device *dev, struct page *page,
848 size_t size, enum dma_data_direction dir)
850 struct scatterlist sg;
852 sg_init_table(&sg, 1);
853 sg_set_page(&sg, page, size, 0);
855 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
856 * for the the targeted device, but this works on the currently targeted
859 sg_dma_address(&sg) = page_to_phys(page);
860 dma_sync_sg_for_device(dev, &sg, 1, dir);
863 struct ion_vma_list {
864 struct list_head list;
865 struct vm_area_struct *vma;
868 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
870 enum dma_data_direction dir)
872 struct ion_vma_list *vma_list;
873 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
876 pr_debug("%s: syncing for device %s\n", __func__,
877 dev ? dev_name(dev) : "null");
879 if (!ion_buffer_fault_user_mappings(buffer))
882 mutex_lock(&buffer->lock);
883 for (i = 0; i < pages; i++) {
884 struct page *page = buffer->pages[i];
886 if (ion_buffer_page_is_dirty(page))
887 ion_pages_sync_for_device(dev, ion_buffer_page(page),
890 ion_buffer_page_clean(buffer->pages + i);
892 list_for_each_entry(vma_list, &buffer->vmas, list) {
893 struct vm_area_struct *vma = vma_list->vma;
895 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
898 mutex_unlock(&buffer->lock);
901 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
903 struct ion_buffer *buffer = vma->vm_private_data;
907 mutex_lock(&buffer->lock);
908 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
909 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
911 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
912 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
913 mutex_unlock(&buffer->lock);
915 return VM_FAULT_ERROR;
917 return VM_FAULT_NOPAGE;
920 static void ion_vm_open(struct vm_area_struct *vma)
922 struct ion_buffer *buffer = vma->vm_private_data;
923 struct ion_vma_list *vma_list;
925 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
929 mutex_lock(&buffer->lock);
930 list_add(&vma_list->list, &buffer->vmas);
931 mutex_unlock(&buffer->lock);
932 pr_debug("%s: adding %p\n", __func__, vma);
935 static void ion_vm_close(struct vm_area_struct *vma)
937 struct ion_buffer *buffer = vma->vm_private_data;
938 struct ion_vma_list *vma_list, *tmp;
940 pr_debug("%s\n", __func__);
941 mutex_lock(&buffer->lock);
942 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
943 if (vma_list->vma != vma)
945 list_del(&vma_list->list);
947 pr_debug("%s: deleting %p\n", __func__, vma);
950 mutex_unlock(&buffer->lock);
953 static struct vm_operations_struct ion_vma_ops = {
955 .close = ion_vm_close,
956 .fault = ion_vm_fault,
959 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
961 struct ion_buffer *buffer = dmabuf->priv;
964 if (!buffer->heap->ops->map_user) {
965 pr_err("%s: this heap does not define a method for mapping "
966 "to userspace\n", __func__);
970 if (ion_buffer_fault_user_mappings(buffer)) {
971 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
973 vma->vm_private_data = buffer;
974 vma->vm_ops = &ion_vma_ops;
979 if (!(buffer->flags & ION_FLAG_CACHED))
980 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
982 mutex_lock(&buffer->lock);
983 /* now map it to userspace */
984 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
985 mutex_unlock(&buffer->lock);
988 pr_err("%s: failure mapping buffer to userspace\n",
994 static void ion_dma_buf_release(struct dma_buf *dmabuf)
996 struct ion_buffer *buffer = dmabuf->priv;
997 ion_buffer_put(buffer);
1000 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1002 struct ion_buffer *buffer = dmabuf->priv;
1003 return buffer->vaddr + offset * PAGE_SIZE;
1006 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1012 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1014 enum dma_data_direction direction)
1016 struct ion_buffer *buffer = dmabuf->priv;
1019 if (!buffer->heap->ops->map_kernel) {
1020 pr_err("%s: map kernel is not implemented by this heap.\n",
1025 mutex_lock(&buffer->lock);
1026 vaddr = ion_buffer_kmap_get(buffer);
1027 mutex_unlock(&buffer->lock);
1028 return PTR_ERR_OR_ZERO(vaddr);
1031 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1033 enum dma_data_direction direction)
1035 struct ion_buffer *buffer = dmabuf->priv;
1037 mutex_lock(&buffer->lock);
1038 ion_buffer_kmap_put(buffer);
1039 mutex_unlock(&buffer->lock);
1042 static struct dma_buf_ops dma_buf_ops = {
1043 .map_dma_buf = ion_map_dma_buf,
1044 .unmap_dma_buf = ion_unmap_dma_buf,
1046 .release = ion_dma_buf_release,
1047 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1048 .end_cpu_access = ion_dma_buf_end_cpu_access,
1049 .kmap_atomic = ion_dma_buf_kmap,
1050 .kunmap_atomic = ion_dma_buf_kunmap,
1051 .kmap = ion_dma_buf_kmap,
1052 .kunmap = ion_dma_buf_kunmap,
1055 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1056 struct ion_handle *handle)
1058 struct ion_buffer *buffer;
1059 struct dma_buf *dmabuf;
1062 mutex_lock(&client->lock);
1063 valid_handle = ion_handle_validate(client, handle);
1064 if (!valid_handle) {
1065 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1066 mutex_unlock(&client->lock);
1067 return ERR_PTR(-EINVAL);
1069 buffer = handle->buffer;
1070 ion_buffer_get(buffer);
1071 mutex_unlock(&client->lock);
1073 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1074 if (IS_ERR(dmabuf)) {
1075 ion_buffer_put(buffer);
1081 EXPORT_SYMBOL(ion_share_dma_buf);
1083 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1085 struct dma_buf *dmabuf;
1088 dmabuf = ion_share_dma_buf(client, handle);
1090 return PTR_ERR(dmabuf);
1092 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1094 dma_buf_put(dmabuf);
1098 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1100 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1102 struct dma_buf *dmabuf;
1103 struct ion_buffer *buffer;
1104 struct ion_handle *handle;
1107 dmabuf = dma_buf_get(fd);
1109 return ERR_PTR(PTR_ERR(dmabuf));
1110 /* if this memory came from ion */
1112 if (dmabuf->ops != &dma_buf_ops) {
1113 pr_err("%s: can not import dmabuf from another exporter\n",
1115 dma_buf_put(dmabuf);
1116 return ERR_PTR(-EINVAL);
1118 buffer = dmabuf->priv;
1120 mutex_lock(&client->lock);
1121 /* if a handle exists for this buffer just take a reference to it */
1122 handle = ion_handle_lookup(client, buffer);
1123 if (!IS_ERR(handle)) {
1124 ion_handle_get(handle);
1125 mutex_unlock(&client->lock);
1128 mutex_unlock(&client->lock);
1130 handle = ion_handle_create(client, buffer);
1134 mutex_lock(&client->lock);
1135 ret = ion_handle_add(client, handle);
1136 mutex_unlock(&client->lock);
1138 ion_handle_put(handle);
1139 handle = ERR_PTR(ret);
1143 dma_buf_put(dmabuf);
1146 EXPORT_SYMBOL(ion_import_dma_buf);
1148 static int ion_sync_for_device(struct ion_client *client, int fd)
1150 struct dma_buf *dmabuf;
1151 struct ion_buffer *buffer;
1153 dmabuf = dma_buf_get(fd);
1155 return PTR_ERR(dmabuf);
1157 /* if this memory came from ion */
1158 if (dmabuf->ops != &dma_buf_ops) {
1159 pr_err("%s: can not sync dmabuf from another exporter\n",
1161 dma_buf_put(dmabuf);
1164 buffer = dmabuf->priv;
1166 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1167 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1168 dma_buf_put(dmabuf);
1172 /* fix up the cases where the ioctl direction bits are incorrect */
1173 static unsigned int ion_ioctl_dir(unsigned int cmd)
1178 case ION_IOC_CUSTOM:
1181 return _IOC_DIR(cmd);
1185 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1187 struct ion_client *client = filp->private_data;
1188 struct ion_device *dev = client->dev;
1189 struct ion_handle *cleanup_handle = NULL;
1194 struct ion_fd_data fd;
1195 struct ion_allocation_data allocation;
1196 struct ion_handle_data handle;
1197 struct ion_custom_data custom;
1200 dir = ion_ioctl_dir(cmd);
1202 if (_IOC_SIZE(cmd) > sizeof(data))
1205 if (dir & _IOC_WRITE)
1206 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1212 struct ion_handle *handle;
1214 handle = ion_alloc(client, data.allocation.len,
1215 data.allocation.align,
1216 data.allocation.heap_id_mask,
1217 data.allocation.flags);
1219 return PTR_ERR(handle);
1221 data.allocation.handle = handle->id;
1223 cleanup_handle = handle;
1228 struct ion_handle *handle;
1230 handle = ion_handle_get_by_id(client, data.handle.handle);
1232 return PTR_ERR(handle);
1233 ion_free(client, handle);
1234 ion_handle_put(handle);
1240 struct ion_handle *handle;
1242 handle = ion_handle_get_by_id(client, data.handle.handle);
1244 return PTR_ERR(handle);
1245 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1246 ion_handle_put(handle);
1251 case ION_IOC_IMPORT:
1253 struct ion_handle *handle;
1254 handle = ion_import_dma_buf(client, data.fd.fd);
1256 ret = PTR_ERR(handle);
1258 data.handle.handle = handle->id;
1263 ret = ion_sync_for_device(client, data.fd.fd);
1266 case ION_IOC_CUSTOM:
1268 if (!dev->custom_ioctl)
1270 ret = dev->custom_ioctl(client, data.custom.cmd,
1278 if (dir & _IOC_READ) {
1279 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1281 ion_free(client, cleanup_handle);
1288 static int ion_release(struct inode *inode, struct file *file)
1290 struct ion_client *client = file->private_data;
1292 pr_debug("%s: %d\n", __func__, __LINE__);
1293 ion_client_destroy(client);
1297 static int ion_open(struct inode *inode, struct file *file)
1299 struct miscdevice *miscdev = file->private_data;
1300 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1301 struct ion_client *client;
1302 char debug_name[64];
1304 pr_debug("%s: %d\n", __func__, __LINE__);
1305 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1306 client = ion_client_create(dev, debug_name);
1308 return PTR_ERR(client);
1309 file->private_data = client;
1314 static const struct file_operations ion_fops = {
1315 .owner = THIS_MODULE,
1317 .release = ion_release,
1318 .unlocked_ioctl = ion_ioctl,
1319 .compat_ioctl = compat_ion_ioctl,
1322 static size_t ion_debug_heap_total(struct ion_client *client,
1328 mutex_lock(&client->lock);
1329 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1330 struct ion_handle *handle = rb_entry(n,
1333 if (handle->buffer->heap->id == id)
1334 size += handle->buffer->size;
1336 mutex_unlock(&client->lock);
1340 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1342 struct ion_heap *heap = s->private;
1343 struct ion_device *dev = heap->dev;
1345 size_t total_size = 0;
1346 size_t total_orphaned_size = 0;
1348 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1349 seq_printf(s, "----------------------------------------------------\n");
1351 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1352 struct ion_client *client = rb_entry(n, struct ion_client,
1354 size_t size = ion_debug_heap_total(client, heap->id);
1358 char task_comm[TASK_COMM_LEN];
1360 get_task_comm(task_comm, client->task);
1361 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1364 seq_printf(s, "%16.s %16u %16zu\n", client->name,
1368 seq_printf(s, "----------------------------------------------------\n");
1369 seq_printf(s, "orphaned allocations (info is from last known client):"
1371 mutex_lock(&dev->buffer_lock);
1372 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1373 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1375 if (buffer->heap->id != heap->id)
1377 total_size += buffer->size;
1378 if (!buffer->handle_count) {
1379 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1380 buffer->task_comm, buffer->pid,
1381 buffer->size, buffer->kmap_cnt,
1382 atomic_read(&buffer->ref.refcount));
1383 total_orphaned_size += buffer->size;
1386 mutex_unlock(&dev->buffer_lock);
1387 seq_printf(s, "----------------------------------------------------\n");
1388 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1389 total_orphaned_size);
1390 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1391 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1392 seq_printf(s, "%16.s %16zu\n", "deferred free",
1393 heap->free_list_size);
1394 seq_printf(s, "----------------------------------------------------\n");
1396 if (heap->debug_show)
1397 heap->debug_show(heap, s, unused);
1402 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1404 return single_open(file, ion_debug_heap_show, inode->i_private);
1407 static const struct file_operations debug_heap_fops = {
1408 .open = ion_debug_heap_open,
1410 .llseek = seq_lseek,
1411 .release = single_release,
1414 #ifdef DEBUG_HEAP_SHRINKER
1415 static int debug_shrink_set(void *data, u64 val)
1417 struct ion_heap *heap = data;
1418 struct shrink_control sc;
1427 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1428 sc.nr_to_scan = objs;
1430 heap->shrinker.shrink(&heap->shrinker, &sc);
1434 static int debug_shrink_get(void *data, u64 *val)
1436 struct ion_heap *heap = data;
1437 struct shrink_control sc;
1443 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1448 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1449 debug_shrink_set, "%llu\n");
1452 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1454 struct dentry *debug_file;
1456 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1457 !heap->ops->unmap_dma)
1458 pr_err("%s: can not add heap with invalid ops struct.\n",
1461 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1462 ion_heap_init_deferred_free(heap);
1465 down_write(&dev->lock);
1466 /* use negative heap->id to reverse the priority -- when traversing
1467 the list later attempt higher id numbers first */
1468 plist_node_init(&heap->node, -heap->id);
1469 plist_add(&heap->node, &dev->heaps);
1470 debug_file = debugfs_create_file(heap->name, 0664,
1471 dev->heaps_debug_root, heap,
1475 char buf[256], *path;
1476 path = dentry_path(dev->heaps_debug_root, buf, 256);
1477 pr_err("Failed to create heap debugfs at %s/%s\n",
1481 #ifdef DEBUG_HEAP_SHRINKER
1482 if (heap->shrinker.shrink) {
1483 char debug_name[64];
1485 snprintf(debug_name, 64, "%s_shrink", heap->name);
1486 debug_file = debugfs_create_file(
1487 debug_name, 0644, dev->heaps_debug_root, heap,
1488 &debug_shrink_fops);
1490 char buf[256], *path;
1491 path = dentry_path(dev->heaps_debug_root, buf, 256);
1492 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1497 up_write(&dev->lock);
1500 struct ion_device *ion_device_create(long (*custom_ioctl)
1501 (struct ion_client *client,
1505 struct ion_device *idev;
1508 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1510 return ERR_PTR(-ENOMEM);
1512 idev->dev.minor = MISC_DYNAMIC_MINOR;
1513 idev->dev.name = "ion";
1514 idev->dev.fops = &ion_fops;
1515 idev->dev.parent = NULL;
1516 ret = misc_register(&idev->dev);
1518 pr_err("ion: failed to register misc device.\n");
1519 return ERR_PTR(ret);
1522 idev->debug_root = debugfs_create_dir("ion", NULL);
1523 if (!idev->debug_root) {
1524 pr_err("ion: failed to create debugfs root directory.\n");
1527 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1528 if (!idev->heaps_debug_root) {
1529 pr_err("ion: failed to create debugfs heaps directory.\n");
1532 idev->clients_debug_root = debugfs_create_dir("clients",
1534 if (!idev->clients_debug_root)
1535 pr_err("ion: failed to create debugfs clients directory.\n");
1539 idev->custom_ioctl = custom_ioctl;
1540 idev->buffers = RB_ROOT;
1541 mutex_init(&idev->buffer_lock);
1542 init_rwsem(&idev->lock);
1543 plist_head_init(&idev->heaps);
1544 idev->clients = RB_ROOT;
1548 void ion_device_destroy(struct ion_device *dev)
1550 misc_deregister(&dev->dev);
1551 debugfs_remove_recursive(dev->debug_root);
1552 /* XXX need to free the heaps and clients ? */
1556 void __init ion_reserve(struct ion_platform_data *data)
1560 for (i = 0; i < data->nr; i++) {
1561 if (data->heaps[i].size == 0)
1564 if (data->heaps[i].base == 0) {
1566 paddr = memblock_alloc_base(data->heaps[i].size,
1567 data->heaps[i].align,
1568 MEMBLOCK_ALLOC_ANYWHERE);
1570 pr_err("%s: error allocating memblock for heap %d\n",
1574 data->heaps[i].base = paddr;
1576 int ret = memblock_reserve(data->heaps[i].base,
1577 data->heaps[i].size);
1579 pr_err("memblock reserve of %zx@%lx failed\n",
1580 data->heaps[i].size,
1581 data->heaps[i].base);
1583 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1584 data->heaps[i].name,
1585 data->heaps[i].base,
1586 data->heaps[i].size);