2 * drivers/staging/android/ion/ion.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/list.h>
22 #include <linux/memblock.h>
23 #include <linux/miscdevice.h>
24 #include <linux/export.h>
26 #include <linux/mm_types.h>
27 #include <linux/rbtree.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/seq_file.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33 #include <linux/dma-buf.h>
40 * struct ion_device - the metadata of the ion device node
41 * @dev: the actual misc device
42 * @buffers: an rb tree of all the existing buffers
43 * @lock: lock protecting the buffers & heaps trees
44 * @heaps: list of all the heaps in the system
45 * @user_clients: list of all the clients created from userspace
48 struct miscdevice dev;
49 struct rb_root buffers;
52 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
54 struct rb_root clients;
55 struct dentry *debug_root;
59 * struct ion_client - a process/hw block local address space
60 * @node: node in the tree of all clients
61 * @dev: backpointer to ion device
62 * @handles: an rb tree of all the handles in this client
63 * @lock: lock protecting the tree of handles
64 * @heap_mask: mask of all supported heaps
65 * @name: used for debugging
66 * @task: used for debugging
68 * A client represents a list of buffers this client may access.
69 * The mutex stored here is used to protect both handles tree
70 * as well as the handles themselves, and should be held while modifying either.
74 struct ion_device *dev;
75 struct rb_root handles;
77 unsigned int heap_mask;
79 struct task_struct *task;
81 struct dentry *debug_root;
85 * ion_handle - a client local reference to a buffer
86 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
90 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
93 * Modifications to node, map_cnt or mapping should be protected by the
94 * lock in the client. Other fields are never changed after initialization.
98 struct ion_client *client;
99 struct ion_buffer *buffer;
101 unsigned int kmap_cnt;
104 /* this function should only be called while dev->lock is held */
105 static void ion_buffer_add(struct ion_device *dev,
106 struct ion_buffer *buffer)
108 struct rb_node **p = &dev->buffers.rb_node;
109 struct rb_node *parent = NULL;
110 struct ion_buffer *entry;
114 entry = rb_entry(parent, struct ion_buffer, node);
116 if (buffer < entry) {
118 } else if (buffer > entry) {
121 pr_err("%s: buffer already found.", __func__);
126 rb_link_node(&buffer->node, parent, p);
127 rb_insert_color(&buffer->node, &dev->buffers);
130 /* this function should only be called while dev->lock is held */
131 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
132 struct ion_device *dev,
137 struct ion_buffer *buffer;
138 struct sg_table *table;
139 struct scatterlist *sg;
142 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144 return ERR_PTR(-ENOMEM);
147 kref_init(&buffer->ref);
149 ret = heap->ops->allocate(heap, buffer, len, align, flags);
158 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
159 if (IS_ERR_OR_NULL(table)) {
160 heap->ops->free(buffer);
162 return ERR_PTR(PTR_ERR(table));
164 buffer->sg_table = table;
166 mutex_init(&buffer->lock);
167 /* this will set up dma addresses for the sglist -- it is not
168 technically correct as per the dma api -- a specific
169 device isn't really taking ownership here. However, in practice on
170 our systems the only dma_address space is physical addresses.
171 Additionally, we can't afford the overhead of invalidating every
172 allocation via dma_map_sg. The implicit contract here is that
173 memory comming from the heaps is ready for dma, ie if it has a
174 cached mapping that mapping has been invalidated */
175 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
176 sg_dma_address(sg) = sg_phys(sg);
177 ion_buffer_add(dev, buffer);
181 static void ion_buffer_destroy(struct kref *kref)
183 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
184 struct ion_device *dev = buffer->dev;
186 if (WARN_ON(buffer->kmap_cnt > 0))
187 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
189 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
190 buffer->heap->ops->free(buffer);
191 mutex_lock(&dev->lock);
192 rb_erase(&buffer->node, &dev->buffers);
193 mutex_unlock(&dev->lock);
197 static void ion_buffer_get(struct ion_buffer *buffer)
199 kref_get(&buffer->ref);
202 static int ion_buffer_put(struct ion_buffer *buffer)
204 return kref_put(&buffer->ref, ion_buffer_destroy);
207 static struct ion_handle *ion_handle_create(struct ion_client *client,
208 struct ion_buffer *buffer)
210 struct ion_handle *handle;
212 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
214 return ERR_PTR(-ENOMEM);
215 kref_init(&handle->ref);
216 RB_CLEAR_NODE(&handle->node);
217 handle->client = client;
218 ion_buffer_get(buffer);
219 handle->buffer = buffer;
224 static void ion_handle_kmap_put(struct ion_handle *);
226 static void ion_handle_destroy(struct kref *kref)
228 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
229 struct ion_client *client = handle->client;
230 struct ion_buffer *buffer = handle->buffer;
232 mutex_lock(&client->lock);
234 mutex_lock(&buffer->lock);
235 while (buffer->kmap_cnt)
236 ion_handle_kmap_put(handle);
237 mutex_unlock(&buffer->lock);
239 if (!RB_EMPTY_NODE(&handle->node))
240 rb_erase(&handle->node, &client->handles);
241 mutex_unlock(&client->lock);
243 ion_buffer_put(buffer);
247 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
249 return handle->buffer;
252 static void ion_handle_get(struct ion_handle *handle)
254 kref_get(&handle->ref);
257 static int ion_handle_put(struct ion_handle *handle)
259 return kref_put(&handle->ref, ion_handle_destroy);
262 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
263 struct ion_buffer *buffer)
267 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
268 struct ion_handle *handle = rb_entry(n, struct ion_handle,
270 if (handle->buffer == buffer)
276 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
278 struct rb_node *n = client->handles.rb_node;
281 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
283 if (handle < handle_node)
285 else if (handle > handle_node)
293 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
295 struct rb_node **p = &client->handles.rb_node;
296 struct rb_node *parent = NULL;
297 struct ion_handle *entry;
301 entry = rb_entry(parent, struct ion_handle, node);
305 else if (handle > entry)
308 WARN(1, "%s: buffer already found.", __func__);
311 rb_link_node(&handle->node, parent, p);
312 rb_insert_color(&handle->node, &client->handles);
315 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
316 size_t align, unsigned int flags)
319 struct ion_handle *handle;
320 struct ion_device *dev = client->dev;
321 struct ion_buffer *buffer = NULL;
324 * traverse the list of heaps available in this system in priority
325 * order. If the heap type is supported by the client, and matches the
326 * request of the caller allocate from it. Repeat until allocate has
327 * succeeded or all heaps have been tried
330 return ERR_PTR(-EINVAL);
332 len = PAGE_ALIGN(len);
334 mutex_lock(&dev->lock);
335 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
336 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
337 /* if the client doesn't support this heap type */
338 if (!((1 << heap->type) & client->heap_mask))
340 /* if the caller didn't specify this heap type */
341 if (!((1 << heap->id) & flags))
343 buffer = ion_buffer_create(heap, dev, len, align, flags);
344 if (!IS_ERR_OR_NULL(buffer))
347 mutex_unlock(&dev->lock);
350 return ERR_PTR(-ENODEV);
353 return ERR_PTR(PTR_ERR(buffer));
355 handle = ion_handle_create(client, buffer);
358 * ion_buffer_create will create a buffer with a ref_cnt of 1,
359 * and ion_handle_create will take a second reference, drop one here
361 ion_buffer_put(buffer);
363 if (!IS_ERR(handle)) {
364 mutex_lock(&client->lock);
365 ion_handle_add(client, handle);
366 mutex_unlock(&client->lock);
373 void ion_free(struct ion_client *client, struct ion_handle *handle)
377 BUG_ON(client != handle->client);
379 mutex_lock(&client->lock);
380 valid_handle = ion_handle_validate(client, handle);
381 mutex_unlock(&client->lock);
384 WARN("%s: invalid handle passed to free.\n", __func__);
387 ion_handle_put(handle);
390 int ion_phys(struct ion_client *client, struct ion_handle *handle,
391 ion_phys_addr_t *addr, size_t *len)
393 struct ion_buffer *buffer;
396 mutex_lock(&client->lock);
397 if (!ion_handle_validate(client, handle)) {
398 mutex_unlock(&client->lock);
402 buffer = handle->buffer;
404 if (!buffer->heap->ops->phys) {
405 pr_err("%s: ion_phys is not implemented by this heap.\n",
407 mutex_unlock(&client->lock);
410 mutex_unlock(&client->lock);
411 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
415 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
419 if (buffer->kmap_cnt) {
421 return buffer->vaddr;
423 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
424 if (IS_ERR_OR_NULL(vaddr))
426 buffer->vaddr = vaddr;
431 static void *ion_handle_kmap_get(struct ion_handle *handle)
433 struct ion_buffer *buffer = handle->buffer;
436 if (handle->kmap_cnt) {
438 return buffer->vaddr;
440 vaddr = ion_buffer_kmap_get(buffer);
441 if (IS_ERR_OR_NULL(vaddr))
447 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
450 if (!buffer->kmap_cnt) {
451 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
452 buffer->vaddr = NULL;
456 static void ion_handle_kmap_put(struct ion_handle *handle)
458 struct ion_buffer *buffer = handle->buffer;
461 if (!handle->kmap_cnt)
462 ion_buffer_kmap_put(buffer);
465 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
467 struct ion_buffer *buffer;
470 mutex_lock(&client->lock);
471 if (!ion_handle_validate(client, handle)) {
472 pr_err("%s: invalid handle passed to map_kernel.\n",
474 mutex_unlock(&client->lock);
475 return ERR_PTR(-EINVAL);
478 buffer = handle->buffer;
480 if (!handle->buffer->heap->ops->map_kernel) {
481 pr_err("%s: map_kernel is not implemented by this heap.\n",
483 mutex_unlock(&client->lock);
484 return ERR_PTR(-ENODEV);
487 mutex_lock(&buffer->lock);
488 vaddr = ion_handle_kmap_get(handle);
489 mutex_unlock(&buffer->lock);
490 mutex_unlock(&client->lock);
494 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
496 struct ion_buffer *buffer;
498 mutex_lock(&client->lock);
499 buffer = handle->buffer;
500 mutex_lock(&buffer->lock);
501 ion_handle_kmap_put(handle);
502 mutex_unlock(&buffer->lock);
503 mutex_unlock(&client->lock);
506 static int ion_debug_client_show(struct seq_file *s, void *unused)
508 struct ion_client *client = s->private;
510 size_t sizes[ION_NUM_HEAPS] = {0};
511 const char *names[ION_NUM_HEAPS] = {0};
514 mutex_lock(&client->lock);
515 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
516 struct ion_handle *handle = rb_entry(n, struct ion_handle,
518 enum ion_heap_type type = handle->buffer->heap->type;
521 names[type] = handle->buffer->heap->name;
522 sizes[type] += handle->buffer->size;
524 mutex_unlock(&client->lock);
526 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
527 for (i = 0; i < ION_NUM_HEAPS; i++) {
530 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
535 static int ion_debug_client_open(struct inode *inode, struct file *file)
537 return single_open(file, ion_debug_client_show, inode->i_private);
540 static const struct file_operations debug_client_fops = {
541 .open = ion_debug_client_open,
544 .release = single_release,
547 struct ion_client *ion_client_create(struct ion_device *dev,
548 unsigned int heap_mask,
551 struct ion_client *client;
552 struct task_struct *task;
554 struct rb_node *parent = NULL;
555 struct ion_client *entry;
559 get_task_struct(current->group_leader);
560 task_lock(current->group_leader);
561 pid = task_pid_nr(current->group_leader);
562 /* don't bother to store task struct for kernel threads,
563 they can't be killed anyway */
564 if (current->group_leader->flags & PF_KTHREAD) {
565 put_task_struct(current->group_leader);
568 task = current->group_leader;
570 task_unlock(current->group_leader);
572 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
575 put_task_struct(current->group_leader);
576 return ERR_PTR(-ENOMEM);
580 client->handles = RB_ROOT;
581 mutex_init(&client->lock);
583 client->heap_mask = heap_mask;
587 mutex_lock(&dev->lock);
588 p = &dev->clients.rb_node;
591 entry = rb_entry(parent, struct ion_client, node);
595 else if (client > entry)
598 rb_link_node(&client->node, parent, p);
599 rb_insert_color(&client->node, &dev->clients);
601 snprintf(debug_name, 64, "%u", client->pid);
602 client->debug_root = debugfs_create_file(debug_name, 0664,
603 dev->debug_root, client,
605 mutex_unlock(&dev->lock);
610 void ion_client_destroy(struct ion_client *client)
612 struct ion_device *dev = client->dev;
615 pr_debug("%s: %d\n", __func__, __LINE__);
616 while ((n = rb_first(&client->handles))) {
617 struct ion_handle *handle = rb_entry(n, struct ion_handle,
619 ion_handle_destroy(&handle->ref);
621 mutex_lock(&dev->lock);
623 put_task_struct(client->task);
624 rb_erase(&client->node, &dev->clients);
625 debugfs_remove_recursive(client->debug_root);
626 mutex_unlock(&dev->lock);
631 struct sg_table *ion_sg_table(struct ion_client *client,
632 struct ion_handle *handle)
634 struct ion_buffer *buffer;
635 struct sg_table *table;
637 mutex_lock(&client->lock);
638 if (!ion_handle_validate(client, handle)) {
639 pr_err("%s: invalid handle passed to map_dma.\n",
641 mutex_unlock(&client->lock);
642 return ERR_PTR(-EINVAL);
644 buffer = handle->buffer;
645 table = buffer->sg_table;
646 mutex_unlock(&client->lock);
650 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
651 enum dma_data_direction direction)
653 struct dma_buf *dmabuf = attachment->dmabuf;
654 struct ion_buffer *buffer = dmabuf->priv;
656 return buffer->sg_table;
659 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
660 struct sg_table *table,
661 enum dma_data_direction direction)
665 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
667 struct ion_buffer *buffer = dmabuf->priv;
670 if (!buffer->heap->ops->map_user) {
671 pr_err("%s: this heap does not define a method for mapping "
672 "to userspace\n", __func__);
676 mutex_lock(&buffer->lock);
677 /* now map it to userspace */
678 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
679 mutex_unlock(&buffer->lock);
682 pr_err("%s: failure mapping buffer to userspace\n",
688 static void ion_dma_buf_release(struct dma_buf *dmabuf)
690 struct ion_buffer *buffer = dmabuf->priv;
691 ion_buffer_put(buffer);
694 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
696 struct ion_buffer *buffer = dmabuf->priv;
697 return buffer->vaddr + offset;
700 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
706 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
708 enum dma_data_direction direction)
710 struct ion_buffer *buffer = dmabuf->priv;
713 if (!buffer->heap->ops->map_kernel) {
714 pr_err("%s: map kernel is not implemented by this heap.\n",
719 mutex_lock(&buffer->lock);
720 vaddr = ion_buffer_kmap_get(buffer);
721 mutex_unlock(&buffer->lock);
723 return PTR_ERR(vaddr);
729 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
731 enum dma_data_direction direction)
733 struct ion_buffer *buffer = dmabuf->priv;
735 mutex_lock(&buffer->lock);
736 ion_buffer_kmap_put(buffer);
737 mutex_unlock(&buffer->lock);
740 struct dma_buf_ops dma_buf_ops = {
741 .map_dma_buf = ion_map_dma_buf,
742 .unmap_dma_buf = ion_unmap_dma_buf,
744 .release = ion_dma_buf_release,
745 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
746 .end_cpu_access = ion_dma_buf_end_cpu_access,
747 .kmap_atomic = ion_dma_buf_kmap,
748 .kunmap_atomic = ion_dma_buf_kunmap,
749 .kmap = ion_dma_buf_kmap,
750 .kunmap = ion_dma_buf_kunmap,
753 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
755 struct ion_buffer *buffer;
756 struct dma_buf *dmabuf;
760 mutex_lock(&client->lock);
761 valid_handle = ion_handle_validate(client, handle);
762 mutex_unlock(&client->lock);
764 WARN("%s: invalid handle passed to share.\n", __func__);
768 buffer = handle->buffer;
769 ion_buffer_get(buffer);
770 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
771 if (IS_ERR(dmabuf)) {
772 ion_buffer_put(buffer);
773 return PTR_ERR(dmabuf);
775 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
778 ion_buffer_put(buffer);
783 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
785 struct dma_buf *dmabuf;
786 struct ion_buffer *buffer;
787 struct ion_handle *handle;
789 dmabuf = dma_buf_get(fd);
790 if (IS_ERR_OR_NULL(dmabuf))
791 return ERR_PTR(PTR_ERR(dmabuf));
792 /* if this memory came from ion */
794 if (dmabuf->ops != &dma_buf_ops) {
795 pr_err("%s: can not import dmabuf from another exporter\n",
798 return ERR_PTR(-EINVAL);
800 buffer = dmabuf->priv;
802 mutex_lock(&client->lock);
803 /* if a handle exists for this buffer just take a reference to it */
804 handle = ion_handle_lookup(client, buffer);
805 if (!IS_ERR_OR_NULL(handle)) {
806 ion_handle_get(handle);
809 handle = ion_handle_create(client, buffer);
810 if (IS_ERR_OR_NULL(handle))
812 ion_handle_add(client, handle);
814 mutex_unlock(&client->lock);
819 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
821 struct ion_client *client = filp->private_data;
826 struct ion_allocation_data data;
828 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
830 data.handle = ion_alloc(client, data.len, data.align,
833 if (IS_ERR(data.handle))
834 return PTR_ERR(data.handle);
836 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
837 ion_free(client, data.handle);
844 struct ion_handle_data data;
847 if (copy_from_user(&data, (void __user *)arg,
848 sizeof(struct ion_handle_data)))
850 mutex_lock(&client->lock);
851 valid = ion_handle_validate(client, data.handle);
852 mutex_unlock(&client->lock);
855 ion_free(client, data.handle);
860 struct ion_fd_data data;
862 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
864 data.fd = ion_share_dma_buf(client, data.handle);
865 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
871 struct ion_fd_data data;
872 if (copy_from_user(&data, (void __user *)arg,
873 sizeof(struct ion_fd_data)))
875 data.handle = ion_import_dma_buf(client, data.fd);
876 if (IS_ERR(data.handle))
878 if (copy_to_user((void __user *)arg, &data,
879 sizeof(struct ion_fd_data)))
885 struct ion_device *dev = client->dev;
886 struct ion_custom_data data;
888 if (!dev->custom_ioctl)
890 if (copy_from_user(&data, (void __user *)arg,
891 sizeof(struct ion_custom_data)))
893 return dev->custom_ioctl(client, data.cmd, data.arg);
901 static int ion_release(struct inode *inode, struct file *file)
903 struct ion_client *client = file->private_data;
905 pr_debug("%s: %d\n", __func__, __LINE__);
906 ion_client_destroy(client);
910 static int ion_open(struct inode *inode, struct file *file)
912 struct miscdevice *miscdev = file->private_data;
913 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
914 struct ion_client *client;
916 pr_debug("%s: %d\n", __func__, __LINE__);
917 client = ion_client_create(dev, -1, "user");
918 if (IS_ERR_OR_NULL(client))
919 return PTR_ERR(client);
920 file->private_data = client;
925 static const struct file_operations ion_fops = {
926 .owner = THIS_MODULE,
928 .release = ion_release,
929 .unlocked_ioctl = ion_ioctl,
932 static size_t ion_debug_heap_total(struct ion_client *client,
933 enum ion_heap_type type)
938 mutex_lock(&client->lock);
939 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
940 struct ion_handle *handle = rb_entry(n,
943 if (handle->buffer->heap->type == type)
944 size += handle->buffer->size;
946 mutex_unlock(&client->lock);
950 static int ion_debug_heap_show(struct seq_file *s, void *unused)
952 struct ion_heap *heap = s->private;
953 struct ion_device *dev = heap->dev;
956 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
958 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
959 struct ion_client *client = rb_entry(n, struct ion_client,
961 size_t size = ion_debug_heap_total(client, heap->type);
965 char task_comm[TASK_COMM_LEN];
967 get_task_comm(task_comm, client->task);
968 seq_printf(s, "%16.s %16u %16u\n", task_comm,
971 seq_printf(s, "%16.s %16u %16u\n", client->name,
978 static int ion_debug_heap_open(struct inode *inode, struct file *file)
980 return single_open(file, ion_debug_heap_show, inode->i_private);
983 static const struct file_operations debug_heap_fops = {
984 .open = ion_debug_heap_open,
987 .release = single_release,
990 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
992 struct rb_node **p = &dev->heaps.rb_node;
993 struct rb_node *parent = NULL;
994 struct ion_heap *entry;
996 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
997 !heap->ops->unmap_dma)
998 pr_err("%s: can not add heap with invalid ops struct.\n",
1002 mutex_lock(&dev->lock);
1005 entry = rb_entry(parent, struct ion_heap, node);
1007 if (heap->id < entry->id) {
1009 } else if (heap->id > entry->id ) {
1010 p = &(*p)->rb_right;
1012 pr_err("%s: can not insert multiple heaps with "
1013 "id %d\n", __func__, heap->id);
1018 rb_link_node(&heap->node, parent, p);
1019 rb_insert_color(&heap->node, &dev->heaps);
1020 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1023 mutex_unlock(&dev->lock);
1026 struct ion_device *ion_device_create(long (*custom_ioctl)
1027 (struct ion_client *client,
1031 struct ion_device *idev;
1034 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1036 return ERR_PTR(-ENOMEM);
1038 idev->dev.minor = MISC_DYNAMIC_MINOR;
1039 idev->dev.name = "ion";
1040 idev->dev.fops = &ion_fops;
1041 idev->dev.parent = NULL;
1042 ret = misc_register(&idev->dev);
1044 pr_err("ion: failed to register misc device.\n");
1045 return ERR_PTR(ret);
1048 idev->debug_root = debugfs_create_dir("ion", NULL);
1049 if (IS_ERR_OR_NULL(idev->debug_root))
1050 pr_err("ion: failed to create debug files.\n");
1052 idev->custom_ioctl = custom_ioctl;
1053 idev->buffers = RB_ROOT;
1054 mutex_init(&idev->lock);
1055 idev->heaps = RB_ROOT;
1056 idev->clients = RB_ROOT;
1060 void ion_device_destroy(struct ion_device *dev)
1062 misc_deregister(&dev->dev);
1063 /* XXX need to free the heaps and clients ? */
1067 void __init ion_reserve(struct ion_platform_data *data)
1071 for (i = 0; i < data->nr; i++) {
1072 if (data->heaps[i].size == 0)
1074 ret = memblock_reserve(data->heaps[i].base,
1075 data->heaps[i].size);
1077 pr_err("memblock reserve of %x@%lx failed\n",
1078 data->heaps[i].size,
1079 data->heaps[i].base);