3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
41 #include "compat_ion.h"
44 * struct ion_device - the metadata of the ion device node
45 * @dev: the actual misc device
46 * @buffers: an rb tree of all the existing buffers
47 * @buffer_lock: lock protecting the tree of buffers
48 * @lock: rwsem protecting the tree of heaps and clients
49 * @heaps: list of all the heaps in the system
50 * @user_clients: list of all the clients created from userspace
53 struct miscdevice dev;
54 struct rb_root buffers;
55 struct mutex buffer_lock;
56 struct rw_semaphore lock;
57 struct plist_head heaps;
58 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
60 struct rb_root clients;
61 struct dentry *debug_root;
65 * struct ion_client - a process/hw block local address space
66 * @node: node in the tree of all clients
67 * @dev: backpointer to ion device
68 * @handles: an rb tree of all the handles in this client
69 * @idr: an idr space for allocating handle ids
70 * @lock: lock protecting the tree of handles
71 * @name: used for debugging
72 * @task: used for debugging
74 * A client represents a list of buffers this client may access.
75 * The mutex stored here is used to protect both handles tree
76 * as well as the handles themselves, and should be held while modifying either.
80 struct ion_device *dev;
81 struct rb_root handles;
85 struct task_struct *task;
87 struct dentry *debug_root;
91 * ion_handle - a client local reference to a buffer
92 * @ref: reference count
93 * @client: back pointer to the client the buffer resides in
94 * @buffer: pointer to the buffer
95 * @node: node in the client's handle rbtree
96 * @kmap_cnt: count of times this client has mapped to kernel
97 * @id: client-unique id allocated by client->idr
99 * Modifications to node, map_cnt or mapping should be protected by the
100 * lock in the client. Other fields are never changed after initialization.
104 struct ion_client *client;
105 struct ion_buffer *buffer;
107 unsigned int kmap_cnt;
111 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
113 return (buffer->flags & ION_FLAG_CACHED) &&
114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
117 bool ion_buffer_cached(struct ion_buffer *buffer)
119 return !!(buffer->flags & ION_FLAG_CACHED);
122 static inline struct page *ion_buffer_page(struct page *page)
124 return (struct page *)((unsigned long)page & ~(1UL));
127 static inline bool ion_buffer_page_is_dirty(struct page *page)
129 return !!((unsigned long)page & 1UL);
132 static inline void ion_buffer_page_dirty(struct page **page)
134 *page = (struct page *)((unsigned long)(*page) | 1UL);
137 static inline void ion_buffer_page_clean(struct page **page)
139 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
142 /* this function should only be called while dev->lock is held */
143 static void ion_buffer_add(struct ion_device *dev,
144 struct ion_buffer *buffer)
146 struct rb_node **p = &dev->buffers.rb_node;
147 struct rb_node *parent = NULL;
148 struct ion_buffer *entry;
152 entry = rb_entry(parent, struct ion_buffer, node);
154 if (buffer < entry) {
156 } else if (buffer > entry) {
159 pr_err("%s: buffer already found.", __func__);
164 rb_link_node(&buffer->node, parent, p);
165 rb_insert_color(&buffer->node, &dev->buffers);
168 /* this function should only be called while dev->lock is held */
169 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170 struct ion_device *dev,
175 struct ion_buffer *buffer;
176 struct sg_table *table;
177 struct scatterlist *sg;
180 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
185 buffer->flags = flags;
186 kref_init(&buffer->ref);
188 ret = heap->ops->allocate(heap, buffer, len, align, flags);
191 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
194 ion_heap_freelist_drain(heap, 0);
195 ret = heap->ops->allocate(heap, buffer, len, align,
204 table = heap->ops->map_dma(heap, buffer);
205 if (WARN_ONCE(table == NULL,
206 "heap->ops->map_dma should return ERR_PTR on error"))
207 table = ERR_PTR(-EINVAL);
209 heap->ops->free(buffer);
211 return ERR_PTR(PTR_ERR(table));
213 buffer->sg_table = table;
214 if (ion_buffer_fault_user_mappings(buffer)) {
215 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
216 struct scatterlist *sg;
219 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
220 if (!buffer->pages) {
225 for_each_sg(table->sgl, sg, table->nents, i) {
226 struct page *page = sg_page(sg);
228 for (j = 0; j < sg->length / PAGE_SIZE; j++)
229 buffer->pages[k++] = page++;
238 INIT_LIST_HEAD(&buffer->vmas);
239 mutex_init(&buffer->lock);
240 /* this will set up dma addresses for the sglist -- it is not
241 technically correct as per the dma api -- a specific
242 device isn't really taking ownership here. However, in practice on
243 our systems the only dma_address space is physical addresses.
244 Additionally, we can't afford the overhead of invalidating every
245 allocation via dma_map_sg. The implicit contract here is that
246 memory comming from the heaps is ready for dma, ie if it has a
247 cached mapping that mapping has been invalidated */
248 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
249 sg_dma_address(sg) = sg_phys(sg);
250 mutex_lock(&dev->buffer_lock);
251 ion_buffer_add(dev, buffer);
252 mutex_unlock(&dev->buffer_lock);
256 heap->ops->unmap_dma(heap, buffer);
257 heap->ops->free(buffer);
260 vfree(buffer->pages);
266 void ion_buffer_destroy(struct ion_buffer *buffer)
268 if (WARN_ON(buffer->kmap_cnt > 0))
269 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
270 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
271 buffer->heap->ops->free(buffer);
273 vfree(buffer->pages);
277 static void _ion_buffer_destroy(struct kref *kref)
279 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
280 struct ion_heap *heap = buffer->heap;
281 struct ion_device *dev = buffer->dev;
283 mutex_lock(&dev->buffer_lock);
284 rb_erase(&buffer->node, &dev->buffers);
285 mutex_unlock(&dev->buffer_lock);
287 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
288 ion_heap_freelist_add(heap, buffer);
290 ion_buffer_destroy(buffer);
293 static void ion_buffer_get(struct ion_buffer *buffer)
295 kref_get(&buffer->ref);
298 static int ion_buffer_put(struct ion_buffer *buffer)
300 return kref_put(&buffer->ref, _ion_buffer_destroy);
303 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
305 mutex_lock(&buffer->lock);
306 buffer->handle_count++;
307 mutex_unlock(&buffer->lock);
310 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
313 * when a buffer is removed from a handle, if it is not in
314 * any other handles, copy the taskcomm and the pid of the
315 * process it's being removed from into the buffer. At this
316 * point there will be no way to track what processes this buffer is
317 * being used by, it only exists as a dma_buf file descriptor.
318 * The taskcomm and pid can provide a debug hint as to where this fd
321 mutex_lock(&buffer->lock);
322 buffer->handle_count--;
323 BUG_ON(buffer->handle_count < 0);
324 if (!buffer->handle_count) {
325 struct task_struct *task;
327 task = current->group_leader;
328 get_task_comm(buffer->task_comm, task);
329 buffer->pid = task_pid_nr(task);
331 mutex_unlock(&buffer->lock);
334 static struct ion_handle *ion_handle_create(struct ion_client *client,
335 struct ion_buffer *buffer)
337 struct ion_handle *handle;
339 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
341 return ERR_PTR(-ENOMEM);
342 kref_init(&handle->ref);
343 RB_CLEAR_NODE(&handle->node);
344 handle->client = client;
345 ion_buffer_get(buffer);
346 ion_buffer_add_to_handle(buffer);
347 handle->buffer = buffer;
352 static void ion_handle_kmap_put(struct ion_handle *);
354 static void ion_handle_destroy(struct kref *kref)
356 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
357 struct ion_client *client = handle->client;
358 struct ion_buffer *buffer = handle->buffer;
360 mutex_lock(&buffer->lock);
361 while (handle->kmap_cnt)
362 ion_handle_kmap_put(handle);
363 mutex_unlock(&buffer->lock);
365 idr_remove(&client->idr, handle->id);
366 if (!RB_EMPTY_NODE(&handle->node))
367 rb_erase(&handle->node, &client->handles);
369 ion_buffer_remove_from_handle(buffer);
370 ion_buffer_put(buffer);
375 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
377 return handle->buffer;
380 static void ion_handle_get(struct ion_handle *handle)
382 kref_get(&handle->ref);
385 static int ion_handle_put(struct ion_handle *handle)
387 struct ion_client *client = handle->client;
390 mutex_lock(&client->lock);
391 ret = kref_put(&handle->ref, ion_handle_destroy);
392 mutex_unlock(&client->lock);
397 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
398 struct ion_buffer *buffer)
400 struct rb_node *n = client->handles.rb_node;
403 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
404 if (buffer < entry->buffer)
406 else if (buffer > entry->buffer)
411 return ERR_PTR(-EINVAL);
414 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
417 struct ion_handle *handle;
419 mutex_lock(&client->lock);
420 handle = idr_find(&client->idr, id);
422 ion_handle_get(handle);
423 mutex_unlock(&client->lock);
425 return handle ? handle : ERR_PTR(-EINVAL);
428 static bool ion_handle_validate(struct ion_client *client,
429 struct ion_handle *handle)
431 WARN_ON(!mutex_is_locked(&client->lock));
432 return (idr_find(&client->idr, handle->id) == handle);
435 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
438 struct rb_node **p = &client->handles.rb_node;
439 struct rb_node *parent = NULL;
440 struct ion_handle *entry;
442 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
450 entry = rb_entry(parent, struct ion_handle, node);
452 if (handle->buffer < entry->buffer)
454 else if (handle->buffer > entry->buffer)
457 WARN(1, "%s: buffer already found.", __func__);
460 rb_link_node(&handle->node, parent, p);
461 rb_insert_color(&handle->node, &client->handles);
466 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
467 size_t align, unsigned int heap_id_mask,
470 struct ion_handle *handle;
471 struct ion_device *dev = client->dev;
472 struct ion_buffer *buffer = NULL;
473 struct ion_heap *heap;
476 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
477 len, align, heap_id_mask, flags);
479 * traverse the list of heaps available in this system in priority
480 * order. If the heap type is supported by the client, and matches the
481 * request of the caller allocate from it. Repeat until allocate has
482 * succeeded or all heaps have been tried
484 len = PAGE_ALIGN(len);
487 return ERR_PTR(-EINVAL);
489 down_read(&dev->lock);
490 plist_for_each_entry(heap, &dev->heaps, node) {
491 /* if the caller didn't specify this heap id */
492 if (!((1 << heap->id) & heap_id_mask))
494 buffer = ion_buffer_create(heap, dev, len, align, flags);
501 return ERR_PTR(-ENODEV);
504 return ERR_PTR(PTR_ERR(buffer));
506 handle = ion_handle_create(client, buffer);
509 * ion_buffer_create will create a buffer with a ref_cnt of 1,
510 * and ion_handle_create will take a second reference, drop one here
512 ion_buffer_put(buffer);
517 mutex_lock(&client->lock);
518 ret = ion_handle_add(client, handle);
519 mutex_unlock(&client->lock);
521 ion_handle_put(handle);
522 handle = ERR_PTR(ret);
527 EXPORT_SYMBOL(ion_alloc);
529 void ion_free(struct ion_client *client, struct ion_handle *handle)
533 BUG_ON(client != handle->client);
535 mutex_lock(&client->lock);
536 valid_handle = ion_handle_validate(client, handle);
539 WARN(1, "%s: invalid handle passed to free.\n", __func__);
540 mutex_unlock(&client->lock);
543 mutex_unlock(&client->lock);
544 ion_handle_put(handle);
546 EXPORT_SYMBOL(ion_free);
548 int ion_phys(struct ion_client *client, struct ion_handle *handle,
549 ion_phys_addr_t *addr, size_t *len)
551 struct ion_buffer *buffer;
554 mutex_lock(&client->lock);
555 if (!ion_handle_validate(client, handle)) {
556 mutex_unlock(&client->lock);
560 buffer = handle->buffer;
562 if (!buffer->heap->ops->phys) {
563 pr_err("%s: ion_phys is not implemented by this heap.\n",
565 mutex_unlock(&client->lock);
568 mutex_unlock(&client->lock);
569 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
572 EXPORT_SYMBOL(ion_phys);
574 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
578 if (buffer->kmap_cnt) {
580 return buffer->vaddr;
582 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
583 if (WARN_ONCE(vaddr == NULL,
584 "heap->ops->map_kernel should return ERR_PTR on error"))
585 return ERR_PTR(-EINVAL);
588 buffer->vaddr = vaddr;
593 static void *ion_handle_kmap_get(struct ion_handle *handle)
595 struct ion_buffer *buffer = handle->buffer;
598 if (handle->kmap_cnt) {
600 return buffer->vaddr;
602 vaddr = ion_buffer_kmap_get(buffer);
609 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
612 if (!buffer->kmap_cnt) {
613 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
614 buffer->vaddr = NULL;
618 static void ion_handle_kmap_put(struct ion_handle *handle)
620 struct ion_buffer *buffer = handle->buffer;
623 if (!handle->kmap_cnt)
624 ion_buffer_kmap_put(buffer);
627 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
629 struct ion_buffer *buffer;
632 mutex_lock(&client->lock);
633 if (!ion_handle_validate(client, handle)) {
634 pr_err("%s: invalid handle passed to map_kernel.\n",
636 mutex_unlock(&client->lock);
637 return ERR_PTR(-EINVAL);
640 buffer = handle->buffer;
642 if (!handle->buffer->heap->ops->map_kernel) {
643 pr_err("%s: map_kernel is not implemented by this heap.\n",
645 mutex_unlock(&client->lock);
646 return ERR_PTR(-ENODEV);
649 mutex_lock(&buffer->lock);
650 vaddr = ion_handle_kmap_get(handle);
651 mutex_unlock(&buffer->lock);
652 mutex_unlock(&client->lock);
655 EXPORT_SYMBOL(ion_map_kernel);
657 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
659 struct ion_buffer *buffer;
661 mutex_lock(&client->lock);
662 buffer = handle->buffer;
663 mutex_lock(&buffer->lock);
664 ion_handle_kmap_put(handle);
665 mutex_unlock(&buffer->lock);
666 mutex_unlock(&client->lock);
668 EXPORT_SYMBOL(ion_unmap_kernel);
670 static int ion_debug_client_show(struct seq_file *s, void *unused)
672 struct ion_client *client = s->private;
674 size_t sizes[ION_NUM_HEAP_IDS] = {0};
675 const char *names[ION_NUM_HEAP_IDS] = {NULL};
678 mutex_lock(&client->lock);
679 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
680 struct ion_handle *handle = rb_entry(n, struct ion_handle,
682 unsigned int id = handle->buffer->heap->id;
685 names[id] = handle->buffer->heap->name;
686 sizes[id] += handle->buffer->size;
688 mutex_unlock(&client->lock);
690 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
691 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
694 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
699 static int ion_debug_client_open(struct inode *inode, struct file *file)
701 return single_open(file, ion_debug_client_show, inode->i_private);
704 static const struct file_operations debug_client_fops = {
705 .open = ion_debug_client_open,
708 .release = single_release,
711 struct ion_client *ion_client_create(struct ion_device *dev,
714 struct ion_client *client;
715 struct task_struct *task;
717 struct rb_node *parent = NULL;
718 struct ion_client *entry;
722 get_task_struct(current->group_leader);
723 task_lock(current->group_leader);
724 pid = task_pid_nr(current->group_leader);
725 /* don't bother to store task struct for kernel threads,
726 they can't be killed anyway */
727 if (current->group_leader->flags & PF_KTHREAD) {
728 put_task_struct(current->group_leader);
731 task = current->group_leader;
733 task_unlock(current->group_leader);
735 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
738 put_task_struct(current->group_leader);
739 return ERR_PTR(-ENOMEM);
743 client->handles = RB_ROOT;
744 idr_init(&client->idr);
745 mutex_init(&client->lock);
750 down_write(&dev->lock);
751 p = &dev->clients.rb_node;
754 entry = rb_entry(parent, struct ion_client, node);
758 else if (client > entry)
761 rb_link_node(&client->node, parent, p);
762 rb_insert_color(&client->node, &dev->clients);
764 snprintf(debug_name, 64, "%u", client->pid);
765 client->debug_root = debugfs_create_file(debug_name, 0664,
766 dev->debug_root, client,
768 up_write(&dev->lock);
772 EXPORT_SYMBOL(ion_client_create);
774 void ion_client_destroy(struct ion_client *client)
776 struct ion_device *dev = client->dev;
779 pr_debug("%s: %d\n", __func__, __LINE__);
780 while ((n = rb_first(&client->handles))) {
781 struct ion_handle *handle = rb_entry(n, struct ion_handle,
783 ion_handle_destroy(&handle->ref);
786 idr_destroy(&client->idr);
788 down_write(&dev->lock);
790 put_task_struct(client->task);
791 rb_erase(&client->node, &dev->clients);
792 debugfs_remove_recursive(client->debug_root);
793 up_write(&dev->lock);
797 EXPORT_SYMBOL(ion_client_destroy);
799 struct sg_table *ion_sg_table(struct ion_client *client,
800 struct ion_handle *handle)
802 struct ion_buffer *buffer;
803 struct sg_table *table;
805 mutex_lock(&client->lock);
806 if (!ion_handle_validate(client, handle)) {
807 pr_err("%s: invalid handle passed to map_dma.\n",
809 mutex_unlock(&client->lock);
810 return ERR_PTR(-EINVAL);
812 buffer = handle->buffer;
813 table = buffer->sg_table;
814 mutex_unlock(&client->lock);
817 EXPORT_SYMBOL(ion_sg_table);
819 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
821 enum dma_data_direction direction);
823 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
824 enum dma_data_direction direction)
826 struct dma_buf *dmabuf = attachment->dmabuf;
827 struct ion_buffer *buffer = dmabuf->priv;
829 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
830 return buffer->sg_table;
833 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
834 struct sg_table *table,
835 enum dma_data_direction direction)
839 void ion_pages_sync_for_device(struct device *dev, struct page *page,
840 size_t size, enum dma_data_direction dir)
842 struct scatterlist sg;
844 sg_init_table(&sg, 1);
845 sg_set_page(&sg, page, size, 0);
847 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
848 * for the the targeted device, but this works on the currently targeted
851 sg_dma_address(&sg) = page_to_phys(page);
852 dma_sync_sg_for_device(dev, &sg, 1, dir);
855 struct ion_vma_list {
856 struct list_head list;
857 struct vm_area_struct *vma;
860 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
862 enum dma_data_direction dir)
864 struct ion_vma_list *vma_list;
865 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
868 pr_debug("%s: syncing for device %s\n", __func__,
869 dev ? dev_name(dev) : "null");
871 if (!ion_buffer_fault_user_mappings(buffer))
874 mutex_lock(&buffer->lock);
875 for (i = 0; i < pages; i++) {
876 struct page *page = buffer->pages[i];
878 if (ion_buffer_page_is_dirty(page))
879 ion_pages_sync_for_device(dev, ion_buffer_page(page),
882 ion_buffer_page_clean(buffer->pages + i);
884 list_for_each_entry(vma_list, &buffer->vmas, list) {
885 struct vm_area_struct *vma = vma_list->vma;
887 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
890 mutex_unlock(&buffer->lock);
893 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
895 struct ion_buffer *buffer = vma->vm_private_data;
899 mutex_lock(&buffer->lock);
900 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
901 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
903 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
904 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
905 mutex_unlock(&buffer->lock);
907 return VM_FAULT_ERROR;
909 return VM_FAULT_NOPAGE;
912 static void ion_vm_open(struct vm_area_struct *vma)
914 struct ion_buffer *buffer = vma->vm_private_data;
915 struct ion_vma_list *vma_list;
917 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
921 mutex_lock(&buffer->lock);
922 list_add(&vma_list->list, &buffer->vmas);
923 mutex_unlock(&buffer->lock);
924 pr_debug("%s: adding %p\n", __func__, vma);
927 static void ion_vm_close(struct vm_area_struct *vma)
929 struct ion_buffer *buffer = vma->vm_private_data;
930 struct ion_vma_list *vma_list, *tmp;
932 pr_debug("%s\n", __func__);
933 mutex_lock(&buffer->lock);
934 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
935 if (vma_list->vma != vma)
937 list_del(&vma_list->list);
939 pr_debug("%s: deleting %p\n", __func__, vma);
942 mutex_unlock(&buffer->lock);
945 static struct vm_operations_struct ion_vma_ops = {
947 .close = ion_vm_close,
948 .fault = ion_vm_fault,
951 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
953 struct ion_buffer *buffer = dmabuf->priv;
956 if (!buffer->heap->ops->map_user) {
957 pr_err("%s: this heap does not define a method for mapping "
958 "to userspace\n", __func__);
962 if (ion_buffer_fault_user_mappings(buffer)) {
963 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
965 vma->vm_private_data = buffer;
966 vma->vm_ops = &ion_vma_ops;
971 if (!(buffer->flags & ION_FLAG_CACHED))
972 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
974 mutex_lock(&buffer->lock);
975 /* now map it to userspace */
976 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
977 mutex_unlock(&buffer->lock);
980 pr_err("%s: failure mapping buffer to userspace\n",
986 static void ion_dma_buf_release(struct dma_buf *dmabuf)
988 struct ion_buffer *buffer = dmabuf->priv;
989 ion_buffer_put(buffer);
992 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
994 struct ion_buffer *buffer = dmabuf->priv;
995 return buffer->vaddr + offset * PAGE_SIZE;
998 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1004 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1006 enum dma_data_direction direction)
1008 struct ion_buffer *buffer = dmabuf->priv;
1011 if (!buffer->heap->ops->map_kernel) {
1012 pr_err("%s: map kernel is not implemented by this heap.\n",
1017 mutex_lock(&buffer->lock);
1018 vaddr = ion_buffer_kmap_get(buffer);
1019 mutex_unlock(&buffer->lock);
1021 return PTR_ERR(vaddr);
1025 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1027 enum dma_data_direction direction)
1029 struct ion_buffer *buffer = dmabuf->priv;
1031 mutex_lock(&buffer->lock);
1032 ion_buffer_kmap_put(buffer);
1033 mutex_unlock(&buffer->lock);
1036 static struct dma_buf_ops dma_buf_ops = {
1037 .map_dma_buf = ion_map_dma_buf,
1038 .unmap_dma_buf = ion_unmap_dma_buf,
1040 .release = ion_dma_buf_release,
1041 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1042 .end_cpu_access = ion_dma_buf_end_cpu_access,
1043 .kmap_atomic = ion_dma_buf_kmap,
1044 .kunmap_atomic = ion_dma_buf_kunmap,
1045 .kmap = ion_dma_buf_kmap,
1046 .kunmap = ion_dma_buf_kunmap,
1049 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1050 struct ion_handle *handle)
1052 struct ion_buffer *buffer;
1053 struct dma_buf *dmabuf;
1056 mutex_lock(&client->lock);
1057 valid_handle = ion_handle_validate(client, handle);
1058 if (!valid_handle) {
1059 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1060 mutex_unlock(&client->lock);
1061 return ERR_PTR(-EINVAL);
1063 buffer = handle->buffer;
1064 ion_buffer_get(buffer);
1065 mutex_unlock(&client->lock);
1067 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1068 if (IS_ERR(dmabuf)) {
1069 ion_buffer_put(buffer);
1075 EXPORT_SYMBOL(ion_share_dma_buf);
1077 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1079 struct dma_buf *dmabuf;
1082 dmabuf = ion_share_dma_buf(client, handle);
1084 return PTR_ERR(dmabuf);
1086 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1088 dma_buf_put(dmabuf);
1092 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1094 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1096 struct dma_buf *dmabuf;
1097 struct ion_buffer *buffer;
1098 struct ion_handle *handle;
1101 dmabuf = dma_buf_get(fd);
1103 return ERR_PTR(PTR_ERR(dmabuf));
1104 /* if this memory came from ion */
1106 if (dmabuf->ops != &dma_buf_ops) {
1107 pr_err("%s: can not import dmabuf from another exporter\n",
1109 dma_buf_put(dmabuf);
1110 return ERR_PTR(-EINVAL);
1112 buffer = dmabuf->priv;
1114 mutex_lock(&client->lock);
1115 /* if a handle exists for this buffer just take a reference to it */
1116 handle = ion_handle_lookup(client, buffer);
1117 if (!IS_ERR(handle)) {
1118 ion_handle_get(handle);
1119 mutex_unlock(&client->lock);
1122 mutex_unlock(&client->lock);
1124 handle = ion_handle_create(client, buffer);
1128 mutex_lock(&client->lock);
1129 ret = ion_handle_add(client, handle);
1130 mutex_unlock(&client->lock);
1132 ion_handle_put(handle);
1133 handle = ERR_PTR(ret);
1137 dma_buf_put(dmabuf);
1140 EXPORT_SYMBOL(ion_import_dma_buf);
1142 static int ion_sync_for_device(struct ion_client *client, int fd)
1144 struct dma_buf *dmabuf;
1145 struct ion_buffer *buffer;
1147 dmabuf = dma_buf_get(fd);
1149 return PTR_ERR(dmabuf);
1151 /* if this memory came from ion */
1152 if (dmabuf->ops != &dma_buf_ops) {
1153 pr_err("%s: can not sync dmabuf from another exporter\n",
1155 dma_buf_put(dmabuf);
1158 buffer = dmabuf->priv;
1160 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1161 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1162 dma_buf_put(dmabuf);
1166 /* fix up the cases where the ioctl direction bits are incorrect */
1167 static unsigned int ion_ioctl_dir(unsigned int cmd)
1172 case ION_IOC_CUSTOM:
1175 return _IOC_DIR(cmd);
1179 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1181 struct ion_client *client = filp->private_data;
1182 struct ion_device *dev = client->dev;
1183 struct ion_handle *cleanup_handle = NULL;
1188 struct ion_fd_data fd;
1189 struct ion_allocation_data allocation;
1190 struct ion_handle_data handle;
1191 struct ion_custom_data custom;
1194 dir = ion_ioctl_dir(cmd);
1196 if (_IOC_SIZE(cmd) > sizeof(data))
1199 if (dir & _IOC_WRITE)
1200 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1206 struct ion_handle *handle;
1208 handle = ion_alloc(client, data.allocation.len,
1209 data.allocation.align,
1210 data.allocation.heap_id_mask,
1211 data.allocation.flags);
1213 return PTR_ERR(handle);
1215 data.allocation.handle = handle->id;
1217 cleanup_handle = handle;
1222 struct ion_handle *handle;
1224 handle = ion_handle_get_by_id(client, data.handle.handle);
1226 return PTR_ERR(handle);
1227 ion_free(client, handle);
1228 ion_handle_put(handle);
1234 struct ion_handle *handle;
1236 handle = ion_handle_get_by_id(client, data.handle.handle);
1238 return PTR_ERR(handle);
1239 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1240 ion_handle_put(handle);
1245 case ION_IOC_IMPORT:
1247 struct ion_handle *handle;
1248 handle = ion_import_dma_buf(client, data.fd.fd);
1250 ret = PTR_ERR(handle);
1252 data.handle.handle = handle->id;
1257 ret = ion_sync_for_device(client, data.fd.fd);
1260 case ION_IOC_CUSTOM:
1262 if (!dev->custom_ioctl)
1264 ret = dev->custom_ioctl(client, data.custom.cmd,
1272 if (dir & _IOC_READ) {
1273 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1275 ion_free(client, cleanup_handle);
1282 static int ion_release(struct inode *inode, struct file *file)
1284 struct ion_client *client = file->private_data;
1286 pr_debug("%s: %d\n", __func__, __LINE__);
1287 ion_client_destroy(client);
1291 static int ion_open(struct inode *inode, struct file *file)
1293 struct miscdevice *miscdev = file->private_data;
1294 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1295 struct ion_client *client;
1297 pr_debug("%s: %d\n", __func__, __LINE__);
1298 client = ion_client_create(dev, "user");
1300 return PTR_ERR(client);
1301 file->private_data = client;
1306 static const struct file_operations ion_fops = {
1307 .owner = THIS_MODULE,
1309 .release = ion_release,
1310 .unlocked_ioctl = ion_ioctl,
1311 .compat_ioctl = compat_ion_ioctl,
1314 static size_t ion_debug_heap_total(struct ion_client *client,
1320 mutex_lock(&client->lock);
1321 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1322 struct ion_handle *handle = rb_entry(n,
1325 if (handle->buffer->heap->id == id)
1326 size += handle->buffer->size;
1328 mutex_unlock(&client->lock);
1332 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1334 struct ion_heap *heap = s->private;
1335 struct ion_device *dev = heap->dev;
1337 size_t total_size = 0;
1338 size_t total_orphaned_size = 0;
1340 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1341 seq_printf(s, "----------------------------------------------------\n");
1343 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1344 struct ion_client *client = rb_entry(n, struct ion_client,
1346 size_t size = ion_debug_heap_total(client, heap->id);
1350 char task_comm[TASK_COMM_LEN];
1352 get_task_comm(task_comm, client->task);
1353 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1356 seq_printf(s, "%16.s %16u %16zu\n", client->name,
1360 seq_printf(s, "----------------------------------------------------\n");
1361 seq_printf(s, "orphaned allocations (info is from last known client):"
1363 mutex_lock(&dev->buffer_lock);
1364 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1365 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1367 if (buffer->heap->id != heap->id)
1369 total_size += buffer->size;
1370 if (!buffer->handle_count) {
1371 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1372 buffer->task_comm, buffer->pid,
1373 buffer->size, buffer->kmap_cnt,
1374 atomic_read(&buffer->ref.refcount));
1375 total_orphaned_size += buffer->size;
1378 mutex_unlock(&dev->buffer_lock);
1379 seq_printf(s, "----------------------------------------------------\n");
1380 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1381 total_orphaned_size);
1382 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1383 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1384 seq_printf(s, "%16.s %16zu\n", "deferred free",
1385 heap->free_list_size);
1386 seq_printf(s, "----------------------------------------------------\n");
1388 if (heap->debug_show)
1389 heap->debug_show(heap, s, unused);
1394 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1396 return single_open(file, ion_debug_heap_show, inode->i_private);
1399 static const struct file_operations debug_heap_fops = {
1400 .open = ion_debug_heap_open,
1402 .llseek = seq_lseek,
1403 .release = single_release,
1406 #ifdef DEBUG_HEAP_SHRINKER
1407 static int debug_shrink_set(void *data, u64 val)
1409 struct ion_heap *heap = data;
1410 struct shrink_control sc;
1419 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1420 sc.nr_to_scan = objs;
1422 heap->shrinker.shrink(&heap->shrinker, &sc);
1426 static int debug_shrink_get(void *data, u64 *val)
1428 struct ion_heap *heap = data;
1429 struct shrink_control sc;
1435 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1440 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1441 debug_shrink_set, "%llu\n");
1444 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1446 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1447 !heap->ops->unmap_dma)
1448 pr_err("%s: can not add heap with invalid ops struct.\n",
1451 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1452 ion_heap_init_deferred_free(heap);
1455 down_write(&dev->lock);
1456 /* use negative heap->id to reverse the priority -- when traversing
1457 the list later attempt higher id numbers first */
1458 plist_node_init(&heap->node, -heap->id);
1459 plist_add(&heap->node, &dev->heaps);
1460 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1462 #ifdef DEBUG_HEAP_SHRINKER
1463 if (heap->shrinker.shrink) {
1464 char debug_name[64];
1466 snprintf(debug_name, 64, "%s_shrink", heap->name);
1467 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1468 &debug_shrink_fops);
1471 up_write(&dev->lock);
1474 struct ion_device *ion_device_create(long (*custom_ioctl)
1475 (struct ion_client *client,
1479 struct ion_device *idev;
1482 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1484 return ERR_PTR(-ENOMEM);
1486 idev->dev.minor = MISC_DYNAMIC_MINOR;
1487 idev->dev.name = "ion";
1488 idev->dev.fops = &ion_fops;
1489 idev->dev.parent = NULL;
1490 ret = misc_register(&idev->dev);
1492 pr_err("ion: failed to register misc device.\n");
1493 return ERR_PTR(ret);
1496 idev->debug_root = debugfs_create_dir("ion", NULL);
1497 if (!idev->debug_root)
1498 pr_err("ion: failed to create debug files.\n");
1500 idev->custom_ioctl = custom_ioctl;
1501 idev->buffers = RB_ROOT;
1502 mutex_init(&idev->buffer_lock);
1503 init_rwsem(&idev->lock);
1504 plist_head_init(&idev->heaps);
1505 idev->clients = RB_ROOT;
1509 void ion_device_destroy(struct ion_device *dev)
1511 misc_deregister(&dev->dev);
1512 /* XXX need to free the heaps and clients ? */
1516 void __init ion_reserve(struct ion_platform_data *data)
1520 for (i = 0; i < data->nr; i++) {
1521 if (data->heaps[i].size == 0)
1524 if (data->heaps[i].base == 0) {
1526 paddr = memblock_alloc_base(data->heaps[i].size,
1527 data->heaps[i].align,
1528 MEMBLOCK_ALLOC_ANYWHERE);
1530 pr_err("%s: error allocating memblock for "
1535 data->heaps[i].base = paddr;
1537 int ret = memblock_reserve(data->heaps[i].base,
1538 data->heaps[i].size);
1540 pr_err("memblock reserve of %zx@%lx failed\n",
1541 data->heaps[i].size,
1542 data->heaps[i].base);
1544 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1545 data->heaps[i].name,
1546 data->heaps[i].base,
1547 data->heaps[i].size);