Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[cascardo/linux.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38
39 #include "ion.h"
40 #include "ion_priv.h"
41 #include "compat_ion.h"
42
43 /**
44  * struct ion_device - the metadata of the ion device node
45  * @dev:                the actual misc device
46  * @buffers:            an rb tree of all the existing buffers
47  * @buffer_lock:        lock protecting the tree of buffers
48  * @lock:               rwsem protecting the tree of heaps and clients
49  * @heaps:              list of all the heaps in the system
50  * @user_clients:       list of all the clients created from userspace
51  */
52 struct ion_device {
53         struct miscdevice dev;
54         struct rb_root buffers;
55         struct mutex buffer_lock;
56         struct rw_semaphore lock;
57         struct plist_head heaps;
58         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
59                               unsigned long arg);
60         struct rb_root clients;
61         struct dentry *debug_root;
62 };
63
64 /**
65  * struct ion_client - a process/hw block local address space
66  * @node:               node in the tree of all clients
67  * @dev:                backpointer to ion device
68  * @handles:            an rb tree of all the handles in this client
69  * @idr:                an idr space for allocating handle ids
70  * @lock:               lock protecting the tree of handles
71  * @name:               used for debugging
72  * @task:               used for debugging
73  *
74  * A client represents a list of buffers this client may access.
75  * The mutex stored here is used to protect both handles tree
76  * as well as the handles themselves, and should be held while modifying either.
77  */
78 struct ion_client {
79         struct rb_node node;
80         struct ion_device *dev;
81         struct rb_root handles;
82         struct idr idr;
83         struct mutex lock;
84         const char *name;
85         struct task_struct *task;
86         pid_t pid;
87         struct dentry *debug_root;
88 };
89
90 /**
91  * ion_handle - a client local reference to a buffer
92  * @ref:                reference count
93  * @client:             back pointer to the client the buffer resides in
94  * @buffer:             pointer to the buffer
95  * @node:               node in the client's handle rbtree
96  * @kmap_cnt:           count of times this client has mapped to kernel
97  * @id:                 client-unique id allocated by client->idr
98  *
99  * Modifications to node, map_cnt or mapping should be protected by the
100  * lock in the client.  Other fields are never changed after initialization.
101  */
102 struct ion_handle {
103         struct kref ref;
104         struct ion_client *client;
105         struct ion_buffer *buffer;
106         struct rb_node node;
107         unsigned int kmap_cnt;
108         int id;
109 };
110
111 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
112 {
113         return (buffer->flags & ION_FLAG_CACHED) &&
114                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
115 }
116
117 bool ion_buffer_cached(struct ion_buffer *buffer)
118 {
119         return !!(buffer->flags & ION_FLAG_CACHED);
120 }
121
122 static inline struct page *ion_buffer_page(struct page *page)
123 {
124         return (struct page *)((unsigned long)page & ~(1UL));
125 }
126
127 static inline bool ion_buffer_page_is_dirty(struct page *page)
128 {
129         return !!((unsigned long)page & 1UL);
130 }
131
132 static inline void ion_buffer_page_dirty(struct page **page)
133 {
134         *page = (struct page *)((unsigned long)(*page) | 1UL);
135 }
136
137 static inline void ion_buffer_page_clean(struct page **page)
138 {
139         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
140 }
141
142 /* this function should only be called while dev->lock is held */
143 static void ion_buffer_add(struct ion_device *dev,
144                            struct ion_buffer *buffer)
145 {
146         struct rb_node **p = &dev->buffers.rb_node;
147         struct rb_node *parent = NULL;
148         struct ion_buffer *entry;
149
150         while (*p) {
151                 parent = *p;
152                 entry = rb_entry(parent, struct ion_buffer, node);
153
154                 if (buffer < entry) {
155                         p = &(*p)->rb_left;
156                 } else if (buffer > entry) {
157                         p = &(*p)->rb_right;
158                 } else {
159                         pr_err("%s: buffer already found.", __func__);
160                         BUG();
161                 }
162         }
163
164         rb_link_node(&buffer->node, parent, p);
165         rb_insert_color(&buffer->node, &dev->buffers);
166 }
167
168 /* this function should only be called while dev->lock is held */
169 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
170                                      struct ion_device *dev,
171                                      unsigned long len,
172                                      unsigned long align,
173                                      unsigned long flags)
174 {
175         struct ion_buffer *buffer;
176         struct sg_table *table;
177         struct scatterlist *sg;
178         int i, ret;
179
180         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
181         if (!buffer)
182                 return ERR_PTR(-ENOMEM);
183
184         buffer->heap = heap;
185         buffer->flags = flags;
186         kref_init(&buffer->ref);
187
188         ret = heap->ops->allocate(heap, buffer, len, align, flags);
189
190         if (ret) {
191                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
192                         goto err2;
193
194                 ion_heap_freelist_drain(heap, 0);
195                 ret = heap->ops->allocate(heap, buffer, len, align,
196                                           flags);
197                 if (ret)
198                         goto err2;
199         }
200
201         buffer->dev = dev;
202         buffer->size = len;
203
204         table = heap->ops->map_dma(heap, buffer);
205         if (WARN_ONCE(table == NULL,
206                         "heap->ops->map_dma should return ERR_PTR on error"))
207                 table = ERR_PTR(-EINVAL);
208         if (IS_ERR(table)) {
209                 heap->ops->free(buffer);
210                 kfree(buffer);
211                 return ERR_PTR(PTR_ERR(table));
212         }
213         buffer->sg_table = table;
214         if (ion_buffer_fault_user_mappings(buffer)) {
215                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
216                 struct scatterlist *sg;
217                 int i, j, k = 0;
218
219                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
220                 if (!buffer->pages) {
221                         ret = -ENOMEM;
222                         goto err1;
223                 }
224
225                 for_each_sg(table->sgl, sg, table->nents, i) {
226                         struct page *page = sg_page(sg);
227
228                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
229                                 buffer->pages[k++] = page++;
230                 }
231
232                 if (ret)
233                         goto err;
234         }
235
236         buffer->dev = dev;
237         buffer->size = len;
238         INIT_LIST_HEAD(&buffer->vmas);
239         mutex_init(&buffer->lock);
240         /* this will set up dma addresses for the sglist -- it is not
241            technically correct as per the dma api -- a specific
242            device isn't really taking ownership here.  However, in practice on
243            our systems the only dma_address space is physical addresses.
244            Additionally, we can't afford the overhead of invalidating every
245            allocation via dma_map_sg. The implicit contract here is that
246            memory comming from the heaps is ready for dma, ie if it has a
247            cached mapping that mapping has been invalidated */
248         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
249                 sg_dma_address(sg) = sg_phys(sg);
250         mutex_lock(&dev->buffer_lock);
251         ion_buffer_add(dev, buffer);
252         mutex_unlock(&dev->buffer_lock);
253         return buffer;
254
255 err:
256         heap->ops->unmap_dma(heap, buffer);
257         heap->ops->free(buffer);
258 err1:
259         if (buffer->pages)
260                 vfree(buffer->pages);
261 err2:
262         kfree(buffer);
263         return ERR_PTR(ret);
264 }
265
266 void ion_buffer_destroy(struct ion_buffer *buffer)
267 {
268         if (WARN_ON(buffer->kmap_cnt > 0))
269                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
270         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
271         buffer->heap->ops->free(buffer);
272         if (buffer->pages)
273                 vfree(buffer->pages);
274         kfree(buffer);
275 }
276
277 static void _ion_buffer_destroy(struct kref *kref)
278 {
279         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
280         struct ion_heap *heap = buffer->heap;
281         struct ion_device *dev = buffer->dev;
282
283         mutex_lock(&dev->buffer_lock);
284         rb_erase(&buffer->node, &dev->buffers);
285         mutex_unlock(&dev->buffer_lock);
286
287         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
288                 ion_heap_freelist_add(heap, buffer);
289         else
290                 ion_buffer_destroy(buffer);
291 }
292
293 static void ion_buffer_get(struct ion_buffer *buffer)
294 {
295         kref_get(&buffer->ref);
296 }
297
298 static int ion_buffer_put(struct ion_buffer *buffer)
299 {
300         return kref_put(&buffer->ref, _ion_buffer_destroy);
301 }
302
303 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
304 {
305         mutex_lock(&buffer->lock);
306         buffer->handle_count++;
307         mutex_unlock(&buffer->lock);
308 }
309
310 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
311 {
312         /*
313          * when a buffer is removed from a handle, if it is not in
314          * any other handles, copy the taskcomm and the pid of the
315          * process it's being removed from into the buffer.  At this
316          * point there will be no way to track what processes this buffer is
317          * being used by, it only exists as a dma_buf file descriptor.
318          * The taskcomm and pid can provide a debug hint as to where this fd
319          * is in the system
320          */
321         mutex_lock(&buffer->lock);
322         buffer->handle_count--;
323         BUG_ON(buffer->handle_count < 0);
324         if (!buffer->handle_count) {
325                 struct task_struct *task;
326
327                 task = current->group_leader;
328                 get_task_comm(buffer->task_comm, task);
329                 buffer->pid = task_pid_nr(task);
330         }
331         mutex_unlock(&buffer->lock);
332 }
333
334 static struct ion_handle *ion_handle_create(struct ion_client *client,
335                                      struct ion_buffer *buffer)
336 {
337         struct ion_handle *handle;
338
339         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
340         if (!handle)
341                 return ERR_PTR(-ENOMEM);
342         kref_init(&handle->ref);
343         RB_CLEAR_NODE(&handle->node);
344         handle->client = client;
345         ion_buffer_get(buffer);
346         ion_buffer_add_to_handle(buffer);
347         handle->buffer = buffer;
348
349         return handle;
350 }
351
352 static void ion_handle_kmap_put(struct ion_handle *);
353
354 static void ion_handle_destroy(struct kref *kref)
355 {
356         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
357         struct ion_client *client = handle->client;
358         struct ion_buffer *buffer = handle->buffer;
359
360         mutex_lock(&buffer->lock);
361         while (handle->kmap_cnt)
362                 ion_handle_kmap_put(handle);
363         mutex_unlock(&buffer->lock);
364
365         idr_remove(&client->idr, handle->id);
366         if (!RB_EMPTY_NODE(&handle->node))
367                 rb_erase(&handle->node, &client->handles);
368
369         ion_buffer_remove_from_handle(buffer);
370         ion_buffer_put(buffer);
371
372         kfree(handle);
373 }
374
375 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
376 {
377         return handle->buffer;
378 }
379
380 static void ion_handle_get(struct ion_handle *handle)
381 {
382         kref_get(&handle->ref);
383 }
384
385 static int ion_handle_put(struct ion_handle *handle)
386 {
387         struct ion_client *client = handle->client;
388         int ret;
389
390         mutex_lock(&client->lock);
391         ret = kref_put(&handle->ref, ion_handle_destroy);
392         mutex_unlock(&client->lock);
393
394         return ret;
395 }
396
397 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
398                                             struct ion_buffer *buffer)
399 {
400         struct rb_node *n = client->handles.rb_node;
401
402         while (n) {
403                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
404                 if (buffer < entry->buffer)
405                         n = n->rb_left;
406                 else if (buffer > entry->buffer)
407                         n = n->rb_right;
408                 else
409                         return entry;
410         }
411         return ERR_PTR(-EINVAL);
412 }
413
414 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
415                                                 int id)
416 {
417         struct ion_handle *handle;
418
419         mutex_lock(&client->lock);
420         handle = idr_find(&client->idr, id);
421         if (handle)
422                 ion_handle_get(handle);
423         mutex_unlock(&client->lock);
424
425         return handle ? handle : ERR_PTR(-EINVAL);
426 }
427
428 static bool ion_handle_validate(struct ion_client *client,
429                                 struct ion_handle *handle)
430 {
431         WARN_ON(!mutex_is_locked(&client->lock));
432         return (idr_find(&client->idr, handle->id) == handle);
433 }
434
435 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
436 {
437         int id;
438         struct rb_node **p = &client->handles.rb_node;
439         struct rb_node *parent = NULL;
440         struct ion_handle *entry;
441
442         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
443         if (id < 0)
444                 return id;
445
446         handle->id = id;
447
448         while (*p) {
449                 parent = *p;
450                 entry = rb_entry(parent, struct ion_handle, node);
451
452                 if (handle->buffer < entry->buffer)
453                         p = &(*p)->rb_left;
454                 else if (handle->buffer > entry->buffer)
455                         p = &(*p)->rb_right;
456                 else
457                         WARN(1, "%s: buffer already found.", __func__);
458         }
459
460         rb_link_node(&handle->node, parent, p);
461         rb_insert_color(&handle->node, &client->handles);
462
463         return 0;
464 }
465
466 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
467                              size_t align, unsigned int heap_id_mask,
468                              unsigned int flags)
469 {
470         struct ion_handle *handle;
471         struct ion_device *dev = client->dev;
472         struct ion_buffer *buffer = NULL;
473         struct ion_heap *heap;
474         int ret;
475
476         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
477                  len, align, heap_id_mask, flags);
478         /*
479          * traverse the list of heaps available in this system in priority
480          * order.  If the heap type is supported by the client, and matches the
481          * request of the caller allocate from it.  Repeat until allocate has
482          * succeeded or all heaps have been tried
483          */
484         len = PAGE_ALIGN(len);
485
486         if (!len)
487                 return ERR_PTR(-EINVAL);
488
489         down_read(&dev->lock);
490         plist_for_each_entry(heap, &dev->heaps, node) {
491                 /* if the caller didn't specify this heap id */
492                 if (!((1 << heap->id) & heap_id_mask))
493                         continue;
494                 buffer = ion_buffer_create(heap, dev, len, align, flags);
495                 if (!IS_ERR(buffer))
496                         break;
497         }
498         up_read(&dev->lock);
499
500         if (buffer == NULL)
501                 return ERR_PTR(-ENODEV);
502
503         if (IS_ERR(buffer))
504                 return ERR_PTR(PTR_ERR(buffer));
505
506         handle = ion_handle_create(client, buffer);
507
508         /*
509          * ion_buffer_create will create a buffer with a ref_cnt of 1,
510          * and ion_handle_create will take a second reference, drop one here
511          */
512         ion_buffer_put(buffer);
513
514         if (IS_ERR(handle))
515                 return handle;
516
517         mutex_lock(&client->lock);
518         ret = ion_handle_add(client, handle);
519         mutex_unlock(&client->lock);
520         if (ret) {
521                 ion_handle_put(handle);
522                 handle = ERR_PTR(ret);
523         }
524
525         return handle;
526 }
527 EXPORT_SYMBOL(ion_alloc);
528
529 void ion_free(struct ion_client *client, struct ion_handle *handle)
530 {
531         bool valid_handle;
532
533         BUG_ON(client != handle->client);
534
535         mutex_lock(&client->lock);
536         valid_handle = ion_handle_validate(client, handle);
537
538         if (!valid_handle) {
539                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
540                 mutex_unlock(&client->lock);
541                 return;
542         }
543         mutex_unlock(&client->lock);
544         ion_handle_put(handle);
545 }
546 EXPORT_SYMBOL(ion_free);
547
548 int ion_phys(struct ion_client *client, struct ion_handle *handle,
549              ion_phys_addr_t *addr, size_t *len)
550 {
551         struct ion_buffer *buffer;
552         int ret;
553
554         mutex_lock(&client->lock);
555         if (!ion_handle_validate(client, handle)) {
556                 mutex_unlock(&client->lock);
557                 return -EINVAL;
558         }
559
560         buffer = handle->buffer;
561
562         if (!buffer->heap->ops->phys) {
563                 pr_err("%s: ion_phys is not implemented by this heap.\n",
564                        __func__);
565                 mutex_unlock(&client->lock);
566                 return -ENODEV;
567         }
568         mutex_unlock(&client->lock);
569         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
570         return ret;
571 }
572 EXPORT_SYMBOL(ion_phys);
573
574 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
575 {
576         void *vaddr;
577
578         if (buffer->kmap_cnt) {
579                 buffer->kmap_cnt++;
580                 return buffer->vaddr;
581         }
582         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
583         if (WARN_ONCE(vaddr == NULL,
584                         "heap->ops->map_kernel should return ERR_PTR on error"))
585                 return ERR_PTR(-EINVAL);
586         if (IS_ERR(vaddr))
587                 return vaddr;
588         buffer->vaddr = vaddr;
589         buffer->kmap_cnt++;
590         return vaddr;
591 }
592
593 static void *ion_handle_kmap_get(struct ion_handle *handle)
594 {
595         struct ion_buffer *buffer = handle->buffer;
596         void *vaddr;
597
598         if (handle->kmap_cnt) {
599                 handle->kmap_cnt++;
600                 return buffer->vaddr;
601         }
602         vaddr = ion_buffer_kmap_get(buffer);
603         if (IS_ERR(vaddr))
604                 return vaddr;
605         handle->kmap_cnt++;
606         return vaddr;
607 }
608
609 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
610 {
611         buffer->kmap_cnt--;
612         if (!buffer->kmap_cnt) {
613                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
614                 buffer->vaddr = NULL;
615         }
616 }
617
618 static void ion_handle_kmap_put(struct ion_handle *handle)
619 {
620         struct ion_buffer *buffer = handle->buffer;
621
622         handle->kmap_cnt--;
623         if (!handle->kmap_cnt)
624                 ion_buffer_kmap_put(buffer);
625 }
626
627 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
628 {
629         struct ion_buffer *buffer;
630         void *vaddr;
631
632         mutex_lock(&client->lock);
633         if (!ion_handle_validate(client, handle)) {
634                 pr_err("%s: invalid handle passed to map_kernel.\n",
635                        __func__);
636                 mutex_unlock(&client->lock);
637                 return ERR_PTR(-EINVAL);
638         }
639
640         buffer = handle->buffer;
641
642         if (!handle->buffer->heap->ops->map_kernel) {
643                 pr_err("%s: map_kernel is not implemented by this heap.\n",
644                        __func__);
645                 mutex_unlock(&client->lock);
646                 return ERR_PTR(-ENODEV);
647         }
648
649         mutex_lock(&buffer->lock);
650         vaddr = ion_handle_kmap_get(handle);
651         mutex_unlock(&buffer->lock);
652         mutex_unlock(&client->lock);
653         return vaddr;
654 }
655 EXPORT_SYMBOL(ion_map_kernel);
656
657 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
658 {
659         struct ion_buffer *buffer;
660
661         mutex_lock(&client->lock);
662         buffer = handle->buffer;
663         mutex_lock(&buffer->lock);
664         ion_handle_kmap_put(handle);
665         mutex_unlock(&buffer->lock);
666         mutex_unlock(&client->lock);
667 }
668 EXPORT_SYMBOL(ion_unmap_kernel);
669
670 static int ion_debug_client_show(struct seq_file *s, void *unused)
671 {
672         struct ion_client *client = s->private;
673         struct rb_node *n;
674         size_t sizes[ION_NUM_HEAP_IDS] = {0};
675         const char *names[ION_NUM_HEAP_IDS] = {NULL};
676         int i;
677
678         mutex_lock(&client->lock);
679         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
680                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
681                                                      node);
682                 unsigned int id = handle->buffer->heap->id;
683
684                 if (!names[id])
685                         names[id] = handle->buffer->heap->name;
686                 sizes[id] += handle->buffer->size;
687         }
688         mutex_unlock(&client->lock);
689
690         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
691         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
692                 if (!names[i])
693                         continue;
694                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
695         }
696         return 0;
697 }
698
699 static int ion_debug_client_open(struct inode *inode, struct file *file)
700 {
701         return single_open(file, ion_debug_client_show, inode->i_private);
702 }
703
704 static const struct file_operations debug_client_fops = {
705         .open = ion_debug_client_open,
706         .read = seq_read,
707         .llseek = seq_lseek,
708         .release = single_release,
709 };
710
711 struct ion_client *ion_client_create(struct ion_device *dev,
712                                      const char *name)
713 {
714         struct ion_client *client;
715         struct task_struct *task;
716         struct rb_node **p;
717         struct rb_node *parent = NULL;
718         struct ion_client *entry;
719         char debug_name[64];
720         pid_t pid;
721
722         get_task_struct(current->group_leader);
723         task_lock(current->group_leader);
724         pid = task_pid_nr(current->group_leader);
725         /* don't bother to store task struct for kernel threads,
726            they can't be killed anyway */
727         if (current->group_leader->flags & PF_KTHREAD) {
728                 put_task_struct(current->group_leader);
729                 task = NULL;
730         } else {
731                 task = current->group_leader;
732         }
733         task_unlock(current->group_leader);
734
735         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
736         if (!client) {
737                 if (task)
738                         put_task_struct(current->group_leader);
739                 return ERR_PTR(-ENOMEM);
740         }
741
742         client->dev = dev;
743         client->handles = RB_ROOT;
744         idr_init(&client->idr);
745         mutex_init(&client->lock);
746         client->name = name;
747         client->task = task;
748         client->pid = pid;
749
750         down_write(&dev->lock);
751         p = &dev->clients.rb_node;
752         while (*p) {
753                 parent = *p;
754                 entry = rb_entry(parent, struct ion_client, node);
755
756                 if (client < entry)
757                         p = &(*p)->rb_left;
758                 else if (client > entry)
759                         p = &(*p)->rb_right;
760         }
761         rb_link_node(&client->node, parent, p);
762         rb_insert_color(&client->node, &dev->clients);
763
764         snprintf(debug_name, 64, "%u", client->pid);
765         client->debug_root = debugfs_create_file(debug_name, 0664,
766                                                  dev->debug_root, client,
767                                                  &debug_client_fops);
768         up_write(&dev->lock);
769
770         return client;
771 }
772 EXPORT_SYMBOL(ion_client_create);
773
774 void ion_client_destroy(struct ion_client *client)
775 {
776         struct ion_device *dev = client->dev;
777         struct rb_node *n;
778
779         pr_debug("%s: %d\n", __func__, __LINE__);
780         while ((n = rb_first(&client->handles))) {
781                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
782                                                      node);
783                 ion_handle_destroy(&handle->ref);
784         }
785
786         idr_destroy(&client->idr);
787
788         down_write(&dev->lock);
789         if (client->task)
790                 put_task_struct(client->task);
791         rb_erase(&client->node, &dev->clients);
792         debugfs_remove_recursive(client->debug_root);
793         up_write(&dev->lock);
794
795         kfree(client);
796 }
797 EXPORT_SYMBOL(ion_client_destroy);
798
799 struct sg_table *ion_sg_table(struct ion_client *client,
800                               struct ion_handle *handle)
801 {
802         struct ion_buffer *buffer;
803         struct sg_table *table;
804
805         mutex_lock(&client->lock);
806         if (!ion_handle_validate(client, handle)) {
807                 pr_err("%s: invalid handle passed to map_dma.\n",
808                        __func__);
809                 mutex_unlock(&client->lock);
810                 return ERR_PTR(-EINVAL);
811         }
812         buffer = handle->buffer;
813         table = buffer->sg_table;
814         mutex_unlock(&client->lock);
815         return table;
816 }
817 EXPORT_SYMBOL(ion_sg_table);
818
819 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
820                                        struct device *dev,
821                                        enum dma_data_direction direction);
822
823 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
824                                         enum dma_data_direction direction)
825 {
826         struct dma_buf *dmabuf = attachment->dmabuf;
827         struct ion_buffer *buffer = dmabuf->priv;
828
829         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
830         return buffer->sg_table;
831 }
832
833 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
834                               struct sg_table *table,
835                               enum dma_data_direction direction)
836 {
837 }
838
839 void ion_pages_sync_for_device(struct device *dev, struct page *page,
840                 size_t size, enum dma_data_direction dir)
841 {
842         struct scatterlist sg;
843
844         sg_init_table(&sg, 1);
845         sg_set_page(&sg, page, size, 0);
846         /*
847          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
848          * for the the targeted device, but this works on the currently targeted
849          * hardware.
850          */
851         sg_dma_address(&sg) = page_to_phys(page);
852         dma_sync_sg_for_device(dev, &sg, 1, dir);
853 }
854
855 struct ion_vma_list {
856         struct list_head list;
857         struct vm_area_struct *vma;
858 };
859
860 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
861                                        struct device *dev,
862                                        enum dma_data_direction dir)
863 {
864         struct ion_vma_list *vma_list;
865         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
866         int i;
867
868         pr_debug("%s: syncing for device %s\n", __func__,
869                  dev ? dev_name(dev) : "null");
870
871         if (!ion_buffer_fault_user_mappings(buffer))
872                 return;
873
874         mutex_lock(&buffer->lock);
875         for (i = 0; i < pages; i++) {
876                 struct page *page = buffer->pages[i];
877
878                 if (ion_buffer_page_is_dirty(page))
879                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
880                                                         PAGE_SIZE, dir);
881
882                 ion_buffer_page_clean(buffer->pages + i);
883         }
884         list_for_each_entry(vma_list, &buffer->vmas, list) {
885                 struct vm_area_struct *vma = vma_list->vma;
886
887                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
888                                NULL);
889         }
890         mutex_unlock(&buffer->lock);
891 }
892
893 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
894 {
895         struct ion_buffer *buffer = vma->vm_private_data;
896         unsigned long pfn;
897         int ret;
898
899         mutex_lock(&buffer->lock);
900         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
901         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
902
903         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
904         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
905         mutex_unlock(&buffer->lock);
906         if (ret)
907                 return VM_FAULT_ERROR;
908
909         return VM_FAULT_NOPAGE;
910 }
911
912 static void ion_vm_open(struct vm_area_struct *vma)
913 {
914         struct ion_buffer *buffer = vma->vm_private_data;
915         struct ion_vma_list *vma_list;
916
917         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
918         if (!vma_list)
919                 return;
920         vma_list->vma = vma;
921         mutex_lock(&buffer->lock);
922         list_add(&vma_list->list, &buffer->vmas);
923         mutex_unlock(&buffer->lock);
924         pr_debug("%s: adding %p\n", __func__, vma);
925 }
926
927 static void ion_vm_close(struct vm_area_struct *vma)
928 {
929         struct ion_buffer *buffer = vma->vm_private_data;
930         struct ion_vma_list *vma_list, *tmp;
931
932         pr_debug("%s\n", __func__);
933         mutex_lock(&buffer->lock);
934         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
935                 if (vma_list->vma != vma)
936                         continue;
937                 list_del(&vma_list->list);
938                 kfree(vma_list);
939                 pr_debug("%s: deleting %p\n", __func__, vma);
940                 break;
941         }
942         mutex_unlock(&buffer->lock);
943 }
944
945 static struct vm_operations_struct ion_vma_ops = {
946         .open = ion_vm_open,
947         .close = ion_vm_close,
948         .fault = ion_vm_fault,
949 };
950
951 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
952 {
953         struct ion_buffer *buffer = dmabuf->priv;
954         int ret = 0;
955
956         if (!buffer->heap->ops->map_user) {
957                 pr_err("%s: this heap does not define a method for mapping "
958                        "to userspace\n", __func__);
959                 return -EINVAL;
960         }
961
962         if (ion_buffer_fault_user_mappings(buffer)) {
963                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
964                                                         VM_DONTDUMP;
965                 vma->vm_private_data = buffer;
966                 vma->vm_ops = &ion_vma_ops;
967                 ion_vm_open(vma);
968                 return 0;
969         }
970
971         if (!(buffer->flags & ION_FLAG_CACHED))
972                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
973
974         mutex_lock(&buffer->lock);
975         /* now map it to userspace */
976         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
977         mutex_unlock(&buffer->lock);
978
979         if (ret)
980                 pr_err("%s: failure mapping buffer to userspace\n",
981                        __func__);
982
983         return ret;
984 }
985
986 static void ion_dma_buf_release(struct dma_buf *dmabuf)
987 {
988         struct ion_buffer *buffer = dmabuf->priv;
989         ion_buffer_put(buffer);
990 }
991
992 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
993 {
994         struct ion_buffer *buffer = dmabuf->priv;
995         return buffer->vaddr + offset * PAGE_SIZE;
996 }
997
998 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
999                                void *ptr)
1000 {
1001         return;
1002 }
1003
1004 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1005                                         size_t len,
1006                                         enum dma_data_direction direction)
1007 {
1008         struct ion_buffer *buffer = dmabuf->priv;
1009         void *vaddr;
1010
1011         if (!buffer->heap->ops->map_kernel) {
1012                 pr_err("%s: map kernel is not implemented by this heap.\n",
1013                        __func__);
1014                 return -ENODEV;
1015         }
1016
1017         mutex_lock(&buffer->lock);
1018         vaddr = ion_buffer_kmap_get(buffer);
1019         mutex_unlock(&buffer->lock);
1020         if (IS_ERR(vaddr))
1021                 return PTR_ERR(vaddr);
1022         return 0;
1023 }
1024
1025 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1026                                        size_t len,
1027                                        enum dma_data_direction direction)
1028 {
1029         struct ion_buffer *buffer = dmabuf->priv;
1030
1031         mutex_lock(&buffer->lock);
1032         ion_buffer_kmap_put(buffer);
1033         mutex_unlock(&buffer->lock);
1034 }
1035
1036 static struct dma_buf_ops dma_buf_ops = {
1037         .map_dma_buf = ion_map_dma_buf,
1038         .unmap_dma_buf = ion_unmap_dma_buf,
1039         .mmap = ion_mmap,
1040         .release = ion_dma_buf_release,
1041         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1042         .end_cpu_access = ion_dma_buf_end_cpu_access,
1043         .kmap_atomic = ion_dma_buf_kmap,
1044         .kunmap_atomic = ion_dma_buf_kunmap,
1045         .kmap = ion_dma_buf_kmap,
1046         .kunmap = ion_dma_buf_kunmap,
1047 };
1048
1049 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1050                                                 struct ion_handle *handle)
1051 {
1052         struct ion_buffer *buffer;
1053         struct dma_buf *dmabuf;
1054         bool valid_handle;
1055
1056         mutex_lock(&client->lock);
1057         valid_handle = ion_handle_validate(client, handle);
1058         if (!valid_handle) {
1059                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1060                 mutex_unlock(&client->lock);
1061                 return ERR_PTR(-EINVAL);
1062         }
1063         buffer = handle->buffer;
1064         ion_buffer_get(buffer);
1065         mutex_unlock(&client->lock);
1066
1067         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1068         if (IS_ERR(dmabuf)) {
1069                 ion_buffer_put(buffer);
1070                 return dmabuf;
1071         }
1072
1073         return dmabuf;
1074 }
1075 EXPORT_SYMBOL(ion_share_dma_buf);
1076
1077 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1078 {
1079         struct dma_buf *dmabuf;
1080         int fd;
1081
1082         dmabuf = ion_share_dma_buf(client, handle);
1083         if (IS_ERR(dmabuf))
1084                 return PTR_ERR(dmabuf);
1085
1086         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1087         if (fd < 0)
1088                 dma_buf_put(dmabuf);
1089
1090         return fd;
1091 }
1092 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1093
1094 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1095 {
1096         struct dma_buf *dmabuf;
1097         struct ion_buffer *buffer;
1098         struct ion_handle *handle;
1099         int ret;
1100
1101         dmabuf = dma_buf_get(fd);
1102         if (IS_ERR(dmabuf))
1103                 return ERR_PTR(PTR_ERR(dmabuf));
1104         /* if this memory came from ion */
1105
1106         if (dmabuf->ops != &dma_buf_ops) {
1107                 pr_err("%s: can not import dmabuf from another exporter\n",
1108                        __func__);
1109                 dma_buf_put(dmabuf);
1110                 return ERR_PTR(-EINVAL);
1111         }
1112         buffer = dmabuf->priv;
1113
1114         mutex_lock(&client->lock);
1115         /* if a handle exists for this buffer just take a reference to it */
1116         handle = ion_handle_lookup(client, buffer);
1117         if (!IS_ERR(handle)) {
1118                 ion_handle_get(handle);
1119                 mutex_unlock(&client->lock);
1120                 goto end;
1121         }
1122         mutex_unlock(&client->lock);
1123
1124         handle = ion_handle_create(client, buffer);
1125         if (IS_ERR(handle))
1126                 goto end;
1127
1128         mutex_lock(&client->lock);
1129         ret = ion_handle_add(client, handle);
1130         mutex_unlock(&client->lock);
1131         if (ret) {
1132                 ion_handle_put(handle);
1133                 handle = ERR_PTR(ret);
1134         }
1135
1136 end:
1137         dma_buf_put(dmabuf);
1138         return handle;
1139 }
1140 EXPORT_SYMBOL(ion_import_dma_buf);
1141
1142 static int ion_sync_for_device(struct ion_client *client, int fd)
1143 {
1144         struct dma_buf *dmabuf;
1145         struct ion_buffer *buffer;
1146
1147         dmabuf = dma_buf_get(fd);
1148         if (IS_ERR(dmabuf))
1149                 return PTR_ERR(dmabuf);
1150
1151         /* if this memory came from ion */
1152         if (dmabuf->ops != &dma_buf_ops) {
1153                 pr_err("%s: can not sync dmabuf from another exporter\n",
1154                        __func__);
1155                 dma_buf_put(dmabuf);
1156                 return -EINVAL;
1157         }
1158         buffer = dmabuf->priv;
1159
1160         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1161                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1162         dma_buf_put(dmabuf);
1163         return 0;
1164 }
1165
1166 /* fix up the cases where the ioctl direction bits are incorrect */
1167 static unsigned int ion_ioctl_dir(unsigned int cmd)
1168 {
1169         switch (cmd) {
1170         case ION_IOC_SYNC:
1171         case ION_IOC_FREE:
1172         case ION_IOC_CUSTOM:
1173                 return _IOC_WRITE;
1174         default:
1175                 return _IOC_DIR(cmd);
1176         }
1177 }
1178
1179 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1180 {
1181         struct ion_client *client = filp->private_data;
1182         struct ion_device *dev = client->dev;
1183         struct ion_handle *cleanup_handle = NULL;
1184         int ret = 0;
1185         unsigned int dir;
1186
1187         union {
1188                 struct ion_fd_data fd;
1189                 struct ion_allocation_data allocation;
1190                 struct ion_handle_data handle;
1191                 struct ion_custom_data custom;
1192         } data;
1193
1194         dir = ion_ioctl_dir(cmd);
1195
1196         if (_IOC_SIZE(cmd) > sizeof(data))
1197                 return -EINVAL;
1198
1199         if (dir & _IOC_WRITE)
1200                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1201                         return -EFAULT;
1202
1203         switch (cmd) {
1204         case ION_IOC_ALLOC:
1205         {
1206                 struct ion_handle *handle;
1207
1208                 handle = ion_alloc(client, data.allocation.len,
1209                                                 data.allocation.align,
1210                                                 data.allocation.heap_id_mask,
1211                                                 data.allocation.flags);
1212                 if (IS_ERR(handle))
1213                         return PTR_ERR(handle);
1214
1215                 data.allocation.handle = handle->id;
1216
1217                 cleanup_handle = handle;
1218                 break;
1219         }
1220         case ION_IOC_FREE:
1221         {
1222                 struct ion_handle *handle;
1223
1224                 handle = ion_handle_get_by_id(client, data.handle.handle);
1225                 if (IS_ERR(handle))
1226                         return PTR_ERR(handle);
1227                 ion_free(client, handle);
1228                 ion_handle_put(handle);
1229                 break;
1230         }
1231         case ION_IOC_SHARE:
1232         case ION_IOC_MAP:
1233         {
1234                 struct ion_handle *handle;
1235
1236                 handle = ion_handle_get_by_id(client, data.handle.handle);
1237                 if (IS_ERR(handle))
1238                         return PTR_ERR(handle);
1239                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1240                 ion_handle_put(handle);
1241                 if (data.fd.fd < 0)
1242                         ret = data.fd.fd;
1243                 break;
1244         }
1245         case ION_IOC_IMPORT:
1246         {
1247                 struct ion_handle *handle;
1248                 handle = ion_import_dma_buf(client, data.fd.fd);
1249                 if (IS_ERR(handle))
1250                         ret = PTR_ERR(handle);
1251                 else
1252                         data.handle.handle = handle->id;
1253                 break;
1254         }
1255         case ION_IOC_SYNC:
1256         {
1257                 ret = ion_sync_for_device(client, data.fd.fd);
1258                 break;
1259         }
1260         case ION_IOC_CUSTOM:
1261         {
1262                 if (!dev->custom_ioctl)
1263                         return -ENOTTY;
1264                 ret = dev->custom_ioctl(client, data.custom.cmd,
1265                                                 data.custom.arg);
1266                 break;
1267         }
1268         default:
1269                 return -ENOTTY;
1270         }
1271
1272         if (dir & _IOC_READ) {
1273                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1274                         if (cleanup_handle)
1275                                 ion_free(client, cleanup_handle);
1276                         return -EFAULT;
1277                 }
1278         }
1279         return ret;
1280 }
1281
1282 static int ion_release(struct inode *inode, struct file *file)
1283 {
1284         struct ion_client *client = file->private_data;
1285
1286         pr_debug("%s: %d\n", __func__, __LINE__);
1287         ion_client_destroy(client);
1288         return 0;
1289 }
1290
1291 static int ion_open(struct inode *inode, struct file *file)
1292 {
1293         struct miscdevice *miscdev = file->private_data;
1294         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1295         struct ion_client *client;
1296
1297         pr_debug("%s: %d\n", __func__, __LINE__);
1298         client = ion_client_create(dev, "user");
1299         if (IS_ERR(client))
1300                 return PTR_ERR(client);
1301         file->private_data = client;
1302
1303         return 0;
1304 }
1305
1306 static const struct file_operations ion_fops = {
1307         .owner          = THIS_MODULE,
1308         .open           = ion_open,
1309         .release        = ion_release,
1310         .unlocked_ioctl = ion_ioctl,
1311         .compat_ioctl   = compat_ion_ioctl,
1312 };
1313
1314 static size_t ion_debug_heap_total(struct ion_client *client,
1315                                    unsigned int id)
1316 {
1317         size_t size = 0;
1318         struct rb_node *n;
1319
1320         mutex_lock(&client->lock);
1321         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1322                 struct ion_handle *handle = rb_entry(n,
1323                                                      struct ion_handle,
1324                                                      node);
1325                 if (handle->buffer->heap->id == id)
1326                         size += handle->buffer->size;
1327         }
1328         mutex_unlock(&client->lock);
1329         return size;
1330 }
1331
1332 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1333 {
1334         struct ion_heap *heap = s->private;
1335         struct ion_device *dev = heap->dev;
1336         struct rb_node *n;
1337         size_t total_size = 0;
1338         size_t total_orphaned_size = 0;
1339
1340         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1341         seq_printf(s, "----------------------------------------------------\n");
1342
1343         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1344                 struct ion_client *client = rb_entry(n, struct ion_client,
1345                                                      node);
1346                 size_t size = ion_debug_heap_total(client, heap->id);
1347                 if (!size)
1348                         continue;
1349                 if (client->task) {
1350                         char task_comm[TASK_COMM_LEN];
1351
1352                         get_task_comm(task_comm, client->task);
1353                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1354                                    client->pid, size);
1355                 } else {
1356                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1357                                    client->pid, size);
1358                 }
1359         }
1360         seq_printf(s, "----------------------------------------------------\n");
1361         seq_printf(s, "orphaned allocations (info is from last known client):"
1362                    "\n");
1363         mutex_lock(&dev->buffer_lock);
1364         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1365                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1366                                                      node);
1367                 if (buffer->heap->id != heap->id)
1368                         continue;
1369                 total_size += buffer->size;
1370                 if (!buffer->handle_count) {
1371                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1372                                    buffer->task_comm, buffer->pid,
1373                                    buffer->size, buffer->kmap_cnt,
1374                                    atomic_read(&buffer->ref.refcount));
1375                         total_orphaned_size += buffer->size;
1376                 }
1377         }
1378         mutex_unlock(&dev->buffer_lock);
1379         seq_printf(s, "----------------------------------------------------\n");
1380         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1381                    total_orphaned_size);
1382         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1383         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1384                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1385                                 heap->free_list_size);
1386         seq_printf(s, "----------------------------------------------------\n");
1387
1388         if (heap->debug_show)
1389                 heap->debug_show(heap, s, unused);
1390
1391         return 0;
1392 }
1393
1394 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1395 {
1396         return single_open(file, ion_debug_heap_show, inode->i_private);
1397 }
1398
1399 static const struct file_operations debug_heap_fops = {
1400         .open = ion_debug_heap_open,
1401         .read = seq_read,
1402         .llseek = seq_lseek,
1403         .release = single_release,
1404 };
1405
1406 #ifdef DEBUG_HEAP_SHRINKER
1407 static int debug_shrink_set(void *data, u64 val)
1408 {
1409         struct ion_heap *heap = data;
1410         struct shrink_control sc;
1411         int objs;
1412
1413         sc.gfp_mask = -1;
1414         sc.nr_to_scan = 0;
1415
1416         if (!val)
1417                 return 0;
1418
1419         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1420         sc.nr_to_scan = objs;
1421
1422         heap->shrinker.shrink(&heap->shrinker, &sc);
1423         return 0;
1424 }
1425
1426 static int debug_shrink_get(void *data, u64 *val)
1427 {
1428         struct ion_heap *heap = data;
1429         struct shrink_control sc;
1430         int objs;
1431
1432         sc.gfp_mask = -1;
1433         sc.nr_to_scan = 0;
1434
1435         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1436         *val = objs;
1437         return 0;
1438 }
1439
1440 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1441                         debug_shrink_set, "%llu\n");
1442 #endif
1443
1444 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1445 {
1446         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1447             !heap->ops->unmap_dma)
1448                 pr_err("%s: can not add heap with invalid ops struct.\n",
1449                        __func__);
1450
1451         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1452                 ion_heap_init_deferred_free(heap);
1453
1454         heap->dev = dev;
1455         down_write(&dev->lock);
1456         /* use negative heap->id to reverse the priority -- when traversing
1457            the list later attempt higher id numbers first */
1458         plist_node_init(&heap->node, -heap->id);
1459         plist_add(&heap->node, &dev->heaps);
1460         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1461                             &debug_heap_fops);
1462 #ifdef DEBUG_HEAP_SHRINKER
1463         if (heap->shrinker.shrink) {
1464                 char debug_name[64];
1465
1466                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1467                 debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
1468                                     &debug_shrink_fops);
1469         }
1470 #endif
1471         up_write(&dev->lock);
1472 }
1473
1474 struct ion_device *ion_device_create(long (*custom_ioctl)
1475                                      (struct ion_client *client,
1476                                       unsigned int cmd,
1477                                       unsigned long arg))
1478 {
1479         struct ion_device *idev;
1480         int ret;
1481
1482         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1483         if (!idev)
1484                 return ERR_PTR(-ENOMEM);
1485
1486         idev->dev.minor = MISC_DYNAMIC_MINOR;
1487         idev->dev.name = "ion";
1488         idev->dev.fops = &ion_fops;
1489         idev->dev.parent = NULL;
1490         ret = misc_register(&idev->dev);
1491         if (ret) {
1492                 pr_err("ion: failed to register misc device.\n");
1493                 return ERR_PTR(ret);
1494         }
1495
1496         idev->debug_root = debugfs_create_dir("ion", NULL);
1497         if (!idev->debug_root)
1498                 pr_err("ion: failed to create debug files.\n");
1499
1500         idev->custom_ioctl = custom_ioctl;
1501         idev->buffers = RB_ROOT;
1502         mutex_init(&idev->buffer_lock);
1503         init_rwsem(&idev->lock);
1504         plist_head_init(&idev->heaps);
1505         idev->clients = RB_ROOT;
1506         return idev;
1507 }
1508
1509 void ion_device_destroy(struct ion_device *dev)
1510 {
1511         misc_deregister(&dev->dev);
1512         /* XXX need to free the heaps and clients ? */
1513         kfree(dev);
1514 }
1515
1516 void __init ion_reserve(struct ion_platform_data *data)
1517 {
1518         int i;
1519
1520         for (i = 0; i < data->nr; i++) {
1521                 if (data->heaps[i].size == 0)
1522                         continue;
1523
1524                 if (data->heaps[i].base == 0) {
1525                         phys_addr_t paddr;
1526                         paddr = memblock_alloc_base(data->heaps[i].size,
1527                                                     data->heaps[i].align,
1528                                                     MEMBLOCK_ALLOC_ANYWHERE);
1529                         if (!paddr) {
1530                                 pr_err("%s: error allocating memblock for "
1531                                        "heap %d\n",
1532                                         __func__, i);
1533                                 continue;
1534                         }
1535                         data->heaps[i].base = paddr;
1536                 } else {
1537                         int ret = memblock_reserve(data->heaps[i].base,
1538                                                data->heaps[i].size);
1539                         if (ret)
1540                                 pr_err("memblock reserve of %zx@%lx failed\n",
1541                                        data->heaps[i].size,
1542                                        data->heaps[i].base);
1543                 }
1544                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1545                         data->heaps[i].name,
1546                         data->heaps[i].base,
1547                         data->heaps[i].size);
1548         }
1549 }