ddfe49a8eaa63b125d5865aa8b4da3bd1dcd0856
[cascardo/linux.git] / drivers / staging / android / ion / ion.c
1 /*
2  * drivers/staging/android/ion/ion.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/device.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/list.h>
22 #include <linux/memblock.h>
23 #include <linux/miscdevice.h>
24 #include <linux/export.h>
25 #include <linux/mm.h>
26 #include <linux/mm_types.h>
27 #include <linux/rbtree.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/seq_file.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
33 #include <linux/dma-buf.h>
34
35 #include "ion.h"
36 #include "ion_priv.h"
37 #define DEBUG
38
39 /**
40  * struct ion_device - the metadata of the ion device node
41  * @dev:                the actual misc device
42  * @buffers:    an rb tree of all the existing buffers
43  * @lock:               lock protecting the buffers & heaps trees
44  * @heaps:              list of all the heaps in the system
45  * @user_clients:       list of all the clients created from userspace
46  */
47 struct ion_device {
48         struct miscdevice dev;
49         struct rb_root buffers;
50         struct mutex lock;
51         struct rb_root heaps;
52         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53                               unsigned long arg);
54         struct rb_root clients;
55         struct dentry *debug_root;
56 };
57
58 /**
59  * struct ion_client - a process/hw block local address space
60  * @node:               node in the tree of all clients
61  * @dev:                backpointer to ion device
62  * @handles:            an rb tree of all the handles in this client
63  * @lock:               lock protecting the tree of handles
64  * @heap_mask:          mask of all supported heaps
65  * @name:               used for debugging
66  * @task:               used for debugging
67  *
68  * A client represents a list of buffers this client may access.
69  * The mutex stored here is used to protect both handles tree
70  * as well as the handles themselves, and should be held while modifying either.
71  */
72 struct ion_client {
73         struct rb_node node;
74         struct ion_device *dev;
75         struct rb_root handles;
76         struct mutex lock;
77         unsigned int heap_mask;
78         const char *name;
79         struct task_struct *task;
80         pid_t pid;
81         struct dentry *debug_root;
82 };
83
84 /**
85  * ion_handle - a client local reference to a buffer
86  * @ref:                reference count
87  * @client:             back pointer to the client the buffer resides in
88  * @buffer:             pointer to the buffer
89  * @node:               node in the client's handle rbtree
90  * @kmap_cnt:           count of times this client has mapped to kernel
91  * @dmap_cnt:           count of times this client has mapped for dma
92  *
93  * Modifications to node, map_cnt or mapping should be protected by the
94  * lock in the client.  Other fields are never changed after initialization.
95  */
96 struct ion_handle {
97         struct kref ref;
98         struct ion_client *client;
99         struct ion_buffer *buffer;
100         struct rb_node node;
101         unsigned int kmap_cnt;
102 };
103
104 /* this function should only be called while dev->lock is held */
105 static void ion_buffer_add(struct ion_device *dev,
106                            struct ion_buffer *buffer)
107 {
108         struct rb_node **p = &dev->buffers.rb_node;
109         struct rb_node *parent = NULL;
110         struct ion_buffer *entry;
111
112         while (*p) {
113                 parent = *p;
114                 entry = rb_entry(parent, struct ion_buffer, node);
115
116                 if (buffer < entry) {
117                         p = &(*p)->rb_left;
118                 } else if (buffer > entry) {
119                         p = &(*p)->rb_right;
120                 } else {
121                         pr_err("%s: buffer already found.", __func__);
122                         BUG();
123                 }
124         }
125
126         rb_link_node(&buffer->node, parent, p);
127         rb_insert_color(&buffer->node, &dev->buffers);
128 }
129
130 /* this function should only be called while dev->lock is held */
131 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
132                                      struct ion_device *dev,
133                                      unsigned long len,
134                                      unsigned long align,
135                                      unsigned long flags)
136 {
137         struct ion_buffer *buffer;
138         struct sg_table *table;
139         struct scatterlist *sg;
140         int i, ret;
141
142         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
143         if (!buffer)
144                 return ERR_PTR(-ENOMEM);
145
146         buffer->heap = heap;
147         kref_init(&buffer->ref);
148
149         ret = heap->ops->allocate(heap, buffer, len, align, flags);
150         if (ret) {
151                 kfree(buffer);
152                 return ERR_PTR(ret);
153         }
154
155         buffer->dev = dev;
156         buffer->size = len;
157
158         table = buffer->heap->ops->map_dma(buffer->heap, buffer);
159         if (IS_ERR_OR_NULL(table)) {
160                 heap->ops->free(buffer);
161                 kfree(buffer);
162                 return ERR_PTR(PTR_ERR(table));
163         }
164         buffer->sg_table = table;
165
166         mutex_init(&buffer->lock);
167         /* this will set up dma addresses for the sglist -- it is not
168            technically correct as per the dma api -- a specific
169            device isn't really taking ownership here.  However, in practice on
170            our systems the only dma_address space is physical addresses.
171            Additionally, we can't afford the overhead of invalidating every
172            allocation via dma_map_sg. The implicit contract here is that
173            memory comming from the heaps is ready for dma, ie if it has a
174            cached mapping that mapping has been invalidated */
175         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
176                 sg_dma_address(sg) = sg_phys(sg);
177         ion_buffer_add(dev, buffer);
178         return buffer;
179 }
180
181 static void ion_buffer_destroy(struct kref *kref)
182 {
183         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
184         struct ion_device *dev = buffer->dev;
185
186         if (WARN_ON(buffer->kmap_cnt > 0))
187                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
188
189         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
190         buffer->heap->ops->free(buffer);
191         mutex_lock(&dev->lock);
192         rb_erase(&buffer->node, &dev->buffers);
193         mutex_unlock(&dev->lock);
194         kfree(buffer);
195 }
196
197 static void ion_buffer_get(struct ion_buffer *buffer)
198 {
199         kref_get(&buffer->ref);
200 }
201
202 static int ion_buffer_put(struct ion_buffer *buffer)
203 {
204         return kref_put(&buffer->ref, ion_buffer_destroy);
205 }
206
207 static struct ion_handle *ion_handle_create(struct ion_client *client,
208                                      struct ion_buffer *buffer)
209 {
210         struct ion_handle *handle;
211
212         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
213         if (!handle)
214                 return ERR_PTR(-ENOMEM);
215         kref_init(&handle->ref);
216         RB_CLEAR_NODE(&handle->node);
217         handle->client = client;
218         ion_buffer_get(buffer);
219         handle->buffer = buffer;
220
221         return handle;
222 }
223
224 static void ion_handle_kmap_put(struct ion_handle *);
225
226 static void ion_handle_destroy(struct kref *kref)
227 {
228         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
229         struct ion_client *client = handle->client;
230         struct ion_buffer *buffer = handle->buffer;
231
232         mutex_lock(&client->lock);
233
234         mutex_lock(&buffer->lock);
235         while (buffer->kmap_cnt)
236                 ion_handle_kmap_put(handle);
237         mutex_unlock(&buffer->lock);
238
239         if (!RB_EMPTY_NODE(&handle->node))
240                 rb_erase(&handle->node, &client->handles);
241         mutex_unlock(&client->lock);
242
243         ion_buffer_put(buffer);
244         kfree(handle);
245 }
246
247 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
248 {
249         return handle->buffer;
250 }
251
252 static void ion_handle_get(struct ion_handle *handle)
253 {
254         kref_get(&handle->ref);
255 }
256
257 static int ion_handle_put(struct ion_handle *handle)
258 {
259         return kref_put(&handle->ref, ion_handle_destroy);
260 }
261
262 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
263                                             struct ion_buffer *buffer)
264 {
265         struct rb_node *n;
266
267         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
268                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
269                                                      node);
270                 if (handle->buffer == buffer)
271                         return handle;
272         }
273         return NULL;
274 }
275
276 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
277 {
278         struct rb_node *n = client->handles.rb_node;
279
280         while (n) {
281                 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
282                                                           node);
283                 if (handle < handle_node)
284                         n = n->rb_left;
285                 else if (handle > handle_node)
286                         n = n->rb_right;
287                 else
288                         return true;
289         }
290         return false;
291 }
292
293 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
294 {
295         struct rb_node **p = &client->handles.rb_node;
296         struct rb_node *parent = NULL;
297         struct ion_handle *entry;
298
299         while (*p) {
300                 parent = *p;
301                 entry = rb_entry(parent, struct ion_handle, node);
302
303                 if (handle < entry)
304                         p = &(*p)->rb_left;
305                 else if (handle > entry)
306                         p = &(*p)->rb_right;
307                 else
308                         WARN(1, "%s: buffer already found.", __func__);
309         }
310
311         rb_link_node(&handle->node, parent, p);
312         rb_insert_color(&handle->node, &client->handles);
313 }
314
315 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
316                              size_t align, unsigned int flags)
317 {
318         struct rb_node *n;
319         struct ion_handle *handle;
320         struct ion_device *dev = client->dev;
321         struct ion_buffer *buffer = NULL;
322
323         /*
324          * traverse the list of heaps available in this system in priority
325          * order.  If the heap type is supported by the client, and matches the
326          * request of the caller allocate from it.  Repeat until allocate has
327          * succeeded or all heaps have been tried
328          */
329         if (WARN_ON(!len))
330                 return ERR_PTR(-EINVAL);
331
332         len = PAGE_ALIGN(len);
333
334         mutex_lock(&dev->lock);
335         for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
336                 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
337                 /* if the client doesn't support this heap type */
338                 if (!((1 << heap->type) & client->heap_mask))
339                         continue;
340                 /* if the caller didn't specify this heap type */
341                 if (!((1 << heap->id) & flags))
342                         continue;
343                 buffer = ion_buffer_create(heap, dev, len, align, flags);
344                 if (!IS_ERR_OR_NULL(buffer))
345                         break;
346         }
347         mutex_unlock(&dev->lock);
348
349         if (buffer == NULL)
350                 return ERR_PTR(-ENODEV);
351
352         if (IS_ERR(buffer))
353                 return ERR_PTR(PTR_ERR(buffer));
354
355         handle = ion_handle_create(client, buffer);
356
357         /*
358          * ion_buffer_create will create a buffer with a ref_cnt of 1,
359          * and ion_handle_create will take a second reference, drop one here
360          */
361         ion_buffer_put(buffer);
362
363         if (!IS_ERR(handle)) {
364                 mutex_lock(&client->lock);
365                 ion_handle_add(client, handle);
366                 mutex_unlock(&client->lock);
367         }
368
369
370         return handle;
371 }
372
373 void ion_free(struct ion_client *client, struct ion_handle *handle)
374 {
375         bool valid_handle;
376
377         BUG_ON(client != handle->client);
378
379         mutex_lock(&client->lock);
380         valid_handle = ion_handle_validate(client, handle);
381         mutex_unlock(&client->lock);
382
383         if (!valid_handle) {
384                 WARN("%s: invalid handle passed to free.\n", __func__);
385                 return;
386         }
387         ion_handle_put(handle);
388 }
389
390 int ion_phys(struct ion_client *client, struct ion_handle *handle,
391              ion_phys_addr_t *addr, size_t *len)
392 {
393         struct ion_buffer *buffer;
394         int ret;
395
396         mutex_lock(&client->lock);
397         if (!ion_handle_validate(client, handle)) {
398                 mutex_unlock(&client->lock);
399                 return -EINVAL;
400         }
401
402         buffer = handle->buffer;
403
404         if (!buffer->heap->ops->phys) {
405                 pr_err("%s: ion_phys is not implemented by this heap.\n",
406                        __func__);
407                 mutex_unlock(&client->lock);
408                 return -ENODEV;
409         }
410         mutex_unlock(&client->lock);
411         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
412         return ret;
413 }
414
415 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
416 {
417         void *vaddr;
418
419         if (buffer->kmap_cnt) {
420                 buffer->kmap_cnt++;
421                 return buffer->vaddr;
422         }
423         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
424         if (IS_ERR_OR_NULL(vaddr))
425                 return vaddr;
426         buffer->vaddr = vaddr;
427         buffer->kmap_cnt++;
428         return vaddr;
429 }
430
431 static void *ion_handle_kmap_get(struct ion_handle *handle)
432 {
433         struct ion_buffer *buffer = handle->buffer;
434         void *vaddr;
435
436         if (handle->kmap_cnt) {
437                 handle->kmap_cnt++;
438                 return buffer->vaddr;
439         }
440         vaddr = ion_buffer_kmap_get(buffer);
441         if (IS_ERR_OR_NULL(vaddr))
442                 return vaddr;
443         handle->kmap_cnt++;
444         return vaddr;
445 }
446
447 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
448 {
449         buffer->kmap_cnt--;
450         if (!buffer->kmap_cnt) {
451                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
452                 buffer->vaddr = NULL;
453         }
454 }
455
456 static void ion_handle_kmap_put(struct ion_handle *handle)
457 {
458         struct ion_buffer *buffer = handle->buffer;
459
460         handle->kmap_cnt--;
461         if (!handle->kmap_cnt)
462                 ion_buffer_kmap_put(buffer);
463 }
464
465 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
466 {
467         struct ion_buffer *buffer;
468         void *vaddr;
469
470         mutex_lock(&client->lock);
471         if (!ion_handle_validate(client, handle)) {
472                 pr_err("%s: invalid handle passed to map_kernel.\n",
473                        __func__);
474                 mutex_unlock(&client->lock);
475                 return ERR_PTR(-EINVAL);
476         }
477
478         buffer = handle->buffer;
479
480         if (!handle->buffer->heap->ops->map_kernel) {
481                 pr_err("%s: map_kernel is not implemented by this heap.\n",
482                        __func__);
483                 mutex_unlock(&client->lock);
484                 return ERR_PTR(-ENODEV);
485         }
486
487         mutex_lock(&buffer->lock);
488         vaddr = ion_handle_kmap_get(handle);
489         mutex_unlock(&buffer->lock);
490         mutex_unlock(&client->lock);
491         return vaddr;
492 }
493
494 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
495 {
496         struct ion_buffer *buffer;
497
498         mutex_lock(&client->lock);
499         buffer = handle->buffer;
500         mutex_lock(&buffer->lock);
501         ion_handle_kmap_put(handle);
502         mutex_unlock(&buffer->lock);
503         mutex_unlock(&client->lock);
504 }
505
506 static int ion_debug_client_show(struct seq_file *s, void *unused)
507 {
508         struct ion_client *client = s->private;
509         struct rb_node *n;
510         size_t sizes[ION_NUM_HEAPS] = {0};
511         const char *names[ION_NUM_HEAPS] = {0};
512         int i;
513
514         mutex_lock(&client->lock);
515         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
516                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
517                                                      node);
518                 enum ion_heap_type type = handle->buffer->heap->type;
519
520                 if (!names[type])
521                         names[type] = handle->buffer->heap->name;
522                 sizes[type] += handle->buffer->size;
523         }
524         mutex_unlock(&client->lock);
525
526         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
527         for (i = 0; i < ION_NUM_HEAPS; i++) {
528                 if (!names[i])
529                         continue;
530                 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
531         }
532         return 0;
533 }
534
535 static int ion_debug_client_open(struct inode *inode, struct file *file)
536 {
537         return single_open(file, ion_debug_client_show, inode->i_private);
538 }
539
540 static const struct file_operations debug_client_fops = {
541         .open = ion_debug_client_open,
542         .read = seq_read,
543         .llseek = seq_lseek,
544         .release = single_release,
545 };
546
547 struct ion_client *ion_client_create(struct ion_device *dev,
548                                      unsigned int heap_mask,
549                                      const char *name)
550 {
551         struct ion_client *client;
552         struct task_struct *task;
553         struct rb_node **p;
554         struct rb_node *parent = NULL;
555         struct ion_client *entry;
556         char debug_name[64];
557         pid_t pid;
558
559         get_task_struct(current->group_leader);
560         task_lock(current->group_leader);
561         pid = task_pid_nr(current->group_leader);
562         /* don't bother to store task struct for kernel threads,
563            they can't be killed anyway */
564         if (current->group_leader->flags & PF_KTHREAD) {
565                 put_task_struct(current->group_leader);
566                 task = NULL;
567         } else {
568                 task = current->group_leader;
569         }
570         task_unlock(current->group_leader);
571
572         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
573         if (!client) {
574                 if (task)
575                         put_task_struct(current->group_leader);
576                 return ERR_PTR(-ENOMEM);
577         }
578
579         client->dev = dev;
580         client->handles = RB_ROOT;
581         mutex_init(&client->lock);
582         client->name = name;
583         client->heap_mask = heap_mask;
584         client->task = task;
585         client->pid = pid;
586
587         mutex_lock(&dev->lock);
588         p = &dev->clients.rb_node;
589         while (*p) {
590                 parent = *p;
591                 entry = rb_entry(parent, struct ion_client, node);
592
593                 if (client < entry)
594                         p = &(*p)->rb_left;
595                 else if (client > entry)
596                         p = &(*p)->rb_right;
597         }
598         rb_link_node(&client->node, parent, p);
599         rb_insert_color(&client->node, &dev->clients);
600
601         snprintf(debug_name, 64, "%u", client->pid);
602         client->debug_root = debugfs_create_file(debug_name, 0664,
603                                                  dev->debug_root, client,
604                                                  &debug_client_fops);
605         mutex_unlock(&dev->lock);
606
607         return client;
608 }
609
610 void ion_client_destroy(struct ion_client *client)
611 {
612         struct ion_device *dev = client->dev;
613         struct rb_node *n;
614
615         pr_debug("%s: %d\n", __func__, __LINE__);
616         while ((n = rb_first(&client->handles))) {
617                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
618                                                      node);
619                 ion_handle_destroy(&handle->ref);
620         }
621         mutex_lock(&dev->lock);
622         if (client->task)
623                 put_task_struct(client->task);
624         rb_erase(&client->node, &dev->clients);
625         debugfs_remove_recursive(client->debug_root);
626         mutex_unlock(&dev->lock);
627
628         kfree(client);
629 }
630
631 struct sg_table *ion_sg_table(struct ion_client *client,
632                               struct ion_handle *handle)
633 {
634         struct ion_buffer *buffer;
635         struct sg_table *table;
636
637         mutex_lock(&client->lock);
638         if (!ion_handle_validate(client, handle)) {
639                 pr_err("%s: invalid handle passed to map_dma.\n",
640                        __func__);
641                 mutex_unlock(&client->lock);
642                 return ERR_PTR(-EINVAL);
643         }
644         buffer = handle->buffer;
645         table = buffer->sg_table;
646         mutex_unlock(&client->lock);
647         return table;
648 }
649
650 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
651                                         enum dma_data_direction direction)
652 {
653         struct dma_buf *dmabuf = attachment->dmabuf;
654         struct ion_buffer *buffer = dmabuf->priv;
655
656         return buffer->sg_table;
657 }
658
659 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
660                               struct sg_table *table,
661                               enum dma_data_direction direction)
662 {
663 }
664
665 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
666 {
667         struct ion_buffer *buffer = dmabuf->priv;
668         int ret;
669
670         if (!buffer->heap->ops->map_user) {
671                 pr_err("%s: this heap does not define a method for mapping "
672                        "to userspace\n", __func__);
673                 return -EINVAL;
674         }
675
676         mutex_lock(&buffer->lock);
677         /* now map it to userspace */
678         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
679         mutex_unlock(&buffer->lock);
680
681         if (ret)
682                 pr_err("%s: failure mapping buffer to userspace\n",
683                        __func__);
684
685         return ret;
686 }
687
688 static void ion_dma_buf_release(struct dma_buf *dmabuf)
689 {
690         struct ion_buffer *buffer = dmabuf->priv;
691         ion_buffer_put(buffer);
692 }
693
694 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
695 {
696         struct ion_buffer *buffer = dmabuf->priv;
697         return buffer->vaddr + offset;
698 }
699
700 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
701                                void *ptr)
702 {
703         return;
704 }
705
706 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
707                                         size_t len,
708                                         enum dma_data_direction direction)
709 {
710         struct ion_buffer *buffer = dmabuf->priv;
711         void *vaddr;
712
713         if (!buffer->heap->ops->map_kernel) {
714                 pr_err("%s: map kernel is not implemented by this heap.\n",
715                        __func__);
716                 return -ENODEV;
717         }
718
719         mutex_lock(&buffer->lock);
720         vaddr = ion_buffer_kmap_get(buffer);
721         mutex_unlock(&buffer->lock);
722         if (IS_ERR(vaddr))
723                 return PTR_ERR(vaddr);
724         if (!vaddr)
725                 return -ENOMEM;
726         return 0;
727 }
728
729 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
730                                        size_t len,
731                                        enum dma_data_direction direction)
732 {
733         struct ion_buffer *buffer = dmabuf->priv;
734
735         mutex_lock(&buffer->lock);
736         ion_buffer_kmap_put(buffer);
737         mutex_unlock(&buffer->lock);
738 }
739
740 struct dma_buf_ops dma_buf_ops = {
741         .map_dma_buf = ion_map_dma_buf,
742         .unmap_dma_buf = ion_unmap_dma_buf,
743         .mmap = ion_mmap,
744         .release = ion_dma_buf_release,
745         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
746         .end_cpu_access = ion_dma_buf_end_cpu_access,
747         .kmap_atomic = ion_dma_buf_kmap,
748         .kunmap_atomic = ion_dma_buf_kunmap,
749         .kmap = ion_dma_buf_kmap,
750         .kunmap = ion_dma_buf_kunmap,
751 };
752
753 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
754 {
755         struct ion_buffer *buffer;
756         struct dma_buf *dmabuf;
757         bool valid_handle;
758         int fd;
759
760         mutex_lock(&client->lock);
761         valid_handle = ion_handle_validate(client, handle);
762         mutex_unlock(&client->lock);
763         if (!valid_handle) {
764                 WARN("%s: invalid handle passed to share.\n", __func__);
765                 return -EINVAL;
766         }
767
768         buffer = handle->buffer;
769         ion_buffer_get(buffer);
770         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
771         if (IS_ERR(dmabuf)) {
772                 ion_buffer_put(buffer);
773                 return PTR_ERR(dmabuf);
774         }
775         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
776         if (fd < 0) {
777                 dma_buf_put(dmabuf);
778                 ion_buffer_put(buffer);
779         }
780         return fd;
781 }
782
783 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
784 {
785         struct dma_buf *dmabuf;
786         struct ion_buffer *buffer;
787         struct ion_handle *handle;
788
789         dmabuf = dma_buf_get(fd);
790         if (IS_ERR_OR_NULL(dmabuf))
791                 return ERR_PTR(PTR_ERR(dmabuf));
792         /* if this memory came from ion */
793
794         if (dmabuf->ops != &dma_buf_ops) {
795                 pr_err("%s: can not import dmabuf from another exporter\n",
796                        __func__);
797                 dma_buf_put(dmabuf);
798                 return ERR_PTR(-EINVAL);
799         }
800         buffer = dmabuf->priv;
801
802         mutex_lock(&client->lock);
803         /* if a handle exists for this buffer just take a reference to it */
804         handle = ion_handle_lookup(client, buffer);
805         if (!IS_ERR_OR_NULL(handle)) {
806                 ion_handle_get(handle);
807                 goto end;
808         }
809         handle = ion_handle_create(client, buffer);
810         if (IS_ERR_OR_NULL(handle))
811                 goto end;
812         ion_handle_add(client, handle);
813 end:
814         mutex_unlock(&client->lock);
815         dma_buf_put(dmabuf);
816         return handle;
817 }
818
819 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
820 {
821         struct ion_client *client = filp->private_data;
822
823         switch (cmd) {
824         case ION_IOC_ALLOC:
825         {
826                 struct ion_allocation_data data;
827
828                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
829                         return -EFAULT;
830                 data.handle = ion_alloc(client, data.len, data.align,
831                                              data.flags);
832
833                 if (IS_ERR(data.handle))
834                         return PTR_ERR(data.handle);
835
836                 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
837                         ion_free(client, data.handle);
838                         return -EFAULT;
839                 }
840                 break;
841         }
842         case ION_IOC_FREE:
843         {
844                 struct ion_handle_data data;
845                 bool valid;
846
847                 if (copy_from_user(&data, (void __user *)arg,
848                                    sizeof(struct ion_handle_data)))
849                         return -EFAULT;
850                 mutex_lock(&client->lock);
851                 valid = ion_handle_validate(client, data.handle);
852                 mutex_unlock(&client->lock);
853                 if (!valid)
854                         return -EINVAL;
855                 ion_free(client, data.handle);
856                 break;
857         }
858         case ION_IOC_SHARE:
859         {
860                 struct ion_fd_data data;
861
862                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
863                         return -EFAULT;
864                 data.fd = ion_share_dma_buf(client, data.handle);
865                 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
866                         return -EFAULT;
867                 break;
868         }
869         case ION_IOC_IMPORT:
870         {
871                 struct ion_fd_data data;
872                 if (copy_from_user(&data, (void __user *)arg,
873                                    sizeof(struct ion_fd_data)))
874                         return -EFAULT;
875                 data.handle = ion_import_dma_buf(client, data.fd);
876                 if (IS_ERR(data.handle))
877                         data.handle = NULL;
878                 if (copy_to_user((void __user *)arg, &data,
879                                  sizeof(struct ion_fd_data)))
880                         return -EFAULT;
881                 break;
882         }
883         case ION_IOC_CUSTOM:
884         {
885                 struct ion_device *dev = client->dev;
886                 struct ion_custom_data data;
887
888                 if (!dev->custom_ioctl)
889                         return -ENOTTY;
890                 if (copy_from_user(&data, (void __user *)arg,
891                                 sizeof(struct ion_custom_data)))
892                         return -EFAULT;
893                 return dev->custom_ioctl(client, data.cmd, data.arg);
894         }
895         default:
896                 return -ENOTTY;
897         }
898         return 0;
899 }
900
901 static int ion_release(struct inode *inode, struct file *file)
902 {
903         struct ion_client *client = file->private_data;
904
905         pr_debug("%s: %d\n", __func__, __LINE__);
906         ion_client_destroy(client);
907         return 0;
908 }
909
910 static int ion_open(struct inode *inode, struct file *file)
911 {
912         struct miscdevice *miscdev = file->private_data;
913         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
914         struct ion_client *client;
915
916         pr_debug("%s: %d\n", __func__, __LINE__);
917         client = ion_client_create(dev, -1, "user");
918         if (IS_ERR_OR_NULL(client))
919                 return PTR_ERR(client);
920         file->private_data = client;
921
922         return 0;
923 }
924
925 static const struct file_operations ion_fops = {
926         .owner          = THIS_MODULE,
927         .open           = ion_open,
928         .release        = ion_release,
929         .unlocked_ioctl = ion_ioctl,
930 };
931
932 static size_t ion_debug_heap_total(struct ion_client *client,
933                                    enum ion_heap_type type)
934 {
935         size_t size = 0;
936         struct rb_node *n;
937
938         mutex_lock(&client->lock);
939         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
940                 struct ion_handle *handle = rb_entry(n,
941                                                      struct ion_handle,
942                                                      node);
943                 if (handle->buffer->heap->type == type)
944                         size += handle->buffer->size;
945         }
946         mutex_unlock(&client->lock);
947         return size;
948 }
949
950 static int ion_debug_heap_show(struct seq_file *s, void *unused)
951 {
952         struct ion_heap *heap = s->private;
953         struct ion_device *dev = heap->dev;
954         struct rb_node *n;
955
956         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
957
958         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
959                 struct ion_client *client = rb_entry(n, struct ion_client,
960                                                      node);
961                 size_t size = ion_debug_heap_total(client, heap->type);
962                 if (!size)
963                         continue;
964                 if (client->task) {
965                         char task_comm[TASK_COMM_LEN];
966
967                         get_task_comm(task_comm, client->task);
968                         seq_printf(s, "%16.s %16u %16u\n", task_comm,
969                                    client->pid, size);
970                 } else {
971                         seq_printf(s, "%16.s %16u %16u\n", client->name,
972                                    client->pid, size);
973                 }
974         }
975         return 0;
976 }
977
978 static int ion_debug_heap_open(struct inode *inode, struct file *file)
979 {
980         return single_open(file, ion_debug_heap_show, inode->i_private);
981 }
982
983 static const struct file_operations debug_heap_fops = {
984         .open = ion_debug_heap_open,
985         .read = seq_read,
986         .llseek = seq_lseek,
987         .release = single_release,
988 };
989
990 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
991 {
992         struct rb_node **p = &dev->heaps.rb_node;
993         struct rb_node *parent = NULL;
994         struct ion_heap *entry;
995
996         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
997             !heap->ops->unmap_dma)
998                 pr_err("%s: can not add heap with invalid ops struct.\n",
999                        __func__);
1000
1001         heap->dev = dev;
1002         mutex_lock(&dev->lock);
1003         while (*p) {
1004                 parent = *p;
1005                 entry = rb_entry(parent, struct ion_heap, node);
1006
1007                 if (heap->id < entry->id) {
1008                         p = &(*p)->rb_left;
1009                 } else if (heap->id > entry->id ) {
1010                         p = &(*p)->rb_right;
1011                 } else {
1012                         pr_err("%s: can not insert multiple heaps with "
1013                                 "id %d\n", __func__, heap->id);
1014                         goto end;
1015                 }
1016         }
1017
1018         rb_link_node(&heap->node, parent, p);
1019         rb_insert_color(&heap->node, &dev->heaps);
1020         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1021                             &debug_heap_fops);
1022 end:
1023         mutex_unlock(&dev->lock);
1024 }
1025
1026 struct ion_device *ion_device_create(long (*custom_ioctl)
1027                                      (struct ion_client *client,
1028                                       unsigned int cmd,
1029                                       unsigned long arg))
1030 {
1031         struct ion_device *idev;
1032         int ret;
1033
1034         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1035         if (!idev)
1036                 return ERR_PTR(-ENOMEM);
1037
1038         idev->dev.minor = MISC_DYNAMIC_MINOR;
1039         idev->dev.name = "ion";
1040         idev->dev.fops = &ion_fops;
1041         idev->dev.parent = NULL;
1042         ret = misc_register(&idev->dev);
1043         if (ret) {
1044                 pr_err("ion: failed to register misc device.\n");
1045                 return ERR_PTR(ret);
1046         }
1047
1048         idev->debug_root = debugfs_create_dir("ion", NULL);
1049         if (IS_ERR_OR_NULL(idev->debug_root))
1050                 pr_err("ion: failed to create debug files.\n");
1051
1052         idev->custom_ioctl = custom_ioctl;
1053         idev->buffers = RB_ROOT;
1054         mutex_init(&idev->lock);
1055         idev->heaps = RB_ROOT;
1056         idev->clients = RB_ROOT;
1057         return idev;
1058 }
1059
1060 void ion_device_destroy(struct ion_device *dev)
1061 {
1062         misc_deregister(&dev->dev);
1063         /* XXX need to free the heaps and clients ? */
1064         kfree(dev);
1065 }
1066
1067 void __init ion_reserve(struct ion_platform_data *data)
1068 {
1069         int i, ret;
1070
1071         for (i = 0; i < data->nr; i++) {
1072                 if (data->heaps[i].size == 0)
1073                         continue;
1074                 ret = memblock_reserve(data->heaps[i].base,
1075                                        data->heaps[i].size);
1076                 if (ret)
1077                         pr_err("memblock reserve of %x@%lx failed\n",
1078                                data->heaps[i].size,
1079                                data->heaps[i].base);
1080         }
1081 }