Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_g2d.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundationr
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/clk.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 #include <linux/workqueue.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dma-attrs.h>
22 #include <linux/of.h>
23
24 #include <drm/drmP.h>
25 #include <drm/exynos_drm.h>
26 #include "exynos_drm_drv.h"
27 #include "exynos_drm_gem.h"
28 #include "exynos_drm_iommu.h"
29
30 #define G2D_HW_MAJOR_VER                4
31 #define G2D_HW_MINOR_VER                1
32
33 /* vaild register range set from user: 0x0104 ~ 0x0880 */
34 #define G2D_VALID_START                 0x0104
35 #define G2D_VALID_END                   0x0880
36
37 /* general registers */
38 #define G2D_SOFT_RESET                  0x0000
39 #define G2D_INTEN                       0x0004
40 #define G2D_INTC_PEND                   0x000C
41 #define G2D_DMA_SFR_BASE_ADDR           0x0080
42 #define G2D_DMA_COMMAND                 0x0084
43 #define G2D_DMA_STATUS                  0x008C
44 #define G2D_DMA_HOLD_CMD                0x0090
45
46 /* command registers */
47 #define G2D_BITBLT_START                0x0100
48
49 /* registers for base address */
50 #define G2D_SRC_BASE_ADDR               0x0304
51 #define G2D_SRC_PLANE2_BASE_ADDR        0x0318
52 #define G2D_DST_BASE_ADDR               0x0404
53 #define G2D_DST_PLANE2_BASE_ADDR        0x0418
54 #define G2D_PAT_BASE_ADDR               0x0500
55 #define G2D_MSK_BASE_ADDR               0x0520
56
57 /* G2D_SOFT_RESET */
58 #define G2D_SFRCLEAR                    (1 << 1)
59 #define G2D_R                           (1 << 0)
60
61 /* G2D_INTEN */
62 #define G2D_INTEN_ACF                   (1 << 3)
63 #define G2D_INTEN_UCF                   (1 << 2)
64 #define G2D_INTEN_GCF                   (1 << 1)
65 #define G2D_INTEN_SCF                   (1 << 0)
66
67 /* G2D_INTC_PEND */
68 #define G2D_INTP_ACMD_FIN               (1 << 3)
69 #define G2D_INTP_UCMD_FIN               (1 << 2)
70 #define G2D_INTP_GCMD_FIN               (1 << 1)
71 #define G2D_INTP_SCMD_FIN               (1 << 0)
72
73 /* G2D_DMA_COMMAND */
74 #define G2D_DMA_HALT                    (1 << 2)
75 #define G2D_DMA_CONTINUE                (1 << 1)
76 #define G2D_DMA_START                   (1 << 0)
77
78 /* G2D_DMA_STATUS */
79 #define G2D_DMA_LIST_DONE_COUNT         (0xFF << 17)
80 #define G2D_DMA_BITBLT_DONE_COUNT       (0xFFFF << 1)
81 #define G2D_DMA_DONE                    (1 << 0)
82 #define G2D_DMA_LIST_DONE_COUNT_OFFSET  17
83
84 /* G2D_DMA_HOLD_CMD */
85 #define G2D_USET_HOLD                   (1 << 2)
86 #define G2D_LIST_HOLD                   (1 << 1)
87 #define G2D_BITBLT_HOLD                 (1 << 0)
88
89 /* G2D_BITBLT_START */
90 #define G2D_START_CASESEL               (1 << 2)
91 #define G2D_START_NHOLT                 (1 << 1)
92 #define G2D_START_BITBLT                (1 << 0)
93
94 #define G2D_CMDLIST_SIZE                (PAGE_SIZE / 4)
95 #define G2D_CMDLIST_NUM                 64
96 #define G2D_CMDLIST_POOL_SIZE           (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
97 #define G2D_CMDLIST_DATA_NUM            (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
98
99 #define MAX_BUF_ADDR_NR                 6
100
101 /* maximum buffer pool size of userptr is 64MB as default */
102 #define MAX_POOL                (64 * 1024 * 1024)
103
104 enum {
105         BUF_TYPE_GEM = 1,
106         BUF_TYPE_USERPTR,
107 };
108
109 /* cmdlist data structure */
110 struct g2d_cmdlist {
111         u32             head;
112         unsigned long   data[G2D_CMDLIST_DATA_NUM];
113         u32             last;   /* last data offset */
114 };
115
116 struct drm_exynos_pending_g2d_event {
117         struct drm_pending_event        base;
118         struct drm_exynos_g2d_event     event;
119 };
120
121 struct g2d_cmdlist_userptr {
122         struct list_head        list;
123         dma_addr_t              dma_addr;
124         unsigned long           userptr;
125         unsigned long           size;
126         struct page             **pages;
127         unsigned int            npages;
128         struct sg_table         *sgt;
129         struct vm_area_struct   *vma;
130         atomic_t                refcount;
131         bool                    in_pool;
132         bool                    out_of_list;
133 };
134
135 struct g2d_cmdlist_node {
136         struct list_head        list;
137         struct g2d_cmdlist      *cmdlist;
138         unsigned int            map_nr;
139         unsigned long           handles[MAX_BUF_ADDR_NR];
140         unsigned int            obj_type[MAX_BUF_ADDR_NR];
141         dma_addr_t              dma_addr;
142
143         struct drm_exynos_pending_g2d_event     *event;
144 };
145
146 struct g2d_runqueue_node {
147         struct list_head        list;
148         struct list_head        run_cmdlist;
149         struct list_head        event_list;
150         struct drm_file         *filp;
151         pid_t                   pid;
152         struct completion       complete;
153         int                     async;
154 };
155
156 struct g2d_data {
157         struct device                   *dev;
158         struct clk                      *gate_clk;
159         void __iomem                    *regs;
160         int                             irq;
161         struct workqueue_struct         *g2d_workq;
162         struct work_struct              runqueue_work;
163         struct exynos_drm_subdrv        subdrv;
164         bool                            suspended;
165
166         /* cmdlist */
167         struct g2d_cmdlist_node         *cmdlist_node;
168         struct list_head                free_cmdlist;
169         struct mutex                    cmdlist_mutex;
170         dma_addr_t                      cmdlist_pool;
171         void                            *cmdlist_pool_virt;
172         struct dma_attrs                cmdlist_dma_attrs;
173
174         /* runqueue*/
175         struct g2d_runqueue_node        *runqueue_node;
176         struct list_head                runqueue;
177         struct mutex                    runqueue_mutex;
178         struct kmem_cache               *runqueue_slab;
179
180         unsigned long                   current_pool;
181         unsigned long                   max_pool;
182 };
183
184 static int g2d_init_cmdlist(struct g2d_data *g2d)
185 {
186         struct device *dev = g2d->dev;
187         struct g2d_cmdlist_node *node = g2d->cmdlist_node;
188         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
189         int nr;
190         int ret;
191
192         init_dma_attrs(&g2d->cmdlist_dma_attrs);
193         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
194
195         g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
196                                                 G2D_CMDLIST_POOL_SIZE,
197                                                 &g2d->cmdlist_pool, GFP_KERNEL,
198                                                 &g2d->cmdlist_dma_attrs);
199         if (!g2d->cmdlist_pool_virt) {
200                 dev_err(dev, "failed to allocate dma memory\n");
201                 return -ENOMEM;
202         }
203
204         node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
205         if (!node) {
206                 dev_err(dev, "failed to allocate memory\n");
207                 ret = -ENOMEM;
208                 goto err;
209         }
210
211         for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
212                 node[nr].cmdlist =
213                         g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
214                 node[nr].dma_addr =
215                         g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
216
217                 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
218         }
219
220         return 0;
221
222 err:
223         dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
224                         g2d->cmdlist_pool_virt,
225                         g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
226         return ret;
227 }
228
229 static void g2d_fini_cmdlist(struct g2d_data *g2d)
230 {
231         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
232
233         kfree(g2d->cmdlist_node);
234         dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
235                         g2d->cmdlist_pool_virt,
236                         g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
237 }
238
239 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
240 {
241         struct device *dev = g2d->dev;
242         struct g2d_cmdlist_node *node;
243
244         mutex_lock(&g2d->cmdlist_mutex);
245         if (list_empty(&g2d->free_cmdlist)) {
246                 dev_err(dev, "there is no free cmdlist\n");
247                 mutex_unlock(&g2d->cmdlist_mutex);
248                 return NULL;
249         }
250
251         node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
252                                 list);
253         list_del_init(&node->list);
254         mutex_unlock(&g2d->cmdlist_mutex);
255
256         return node;
257 }
258
259 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
260 {
261         mutex_lock(&g2d->cmdlist_mutex);
262         list_move_tail(&node->list, &g2d->free_cmdlist);
263         mutex_unlock(&g2d->cmdlist_mutex);
264 }
265
266 static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
267                                      struct g2d_cmdlist_node *node)
268 {
269         struct g2d_cmdlist_node *lnode;
270
271         if (list_empty(&g2d_priv->inuse_cmdlist))
272                 goto add_to_list;
273
274         /* this links to base address of new cmdlist */
275         lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
276                                 struct g2d_cmdlist_node, list);
277         lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
278
279 add_to_list:
280         list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
281
282         if (node->event)
283                 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
284 }
285
286 static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
287                                         unsigned long obj,
288                                         bool force)
289 {
290         struct g2d_cmdlist_userptr *g2d_userptr =
291                                         (struct g2d_cmdlist_userptr *)obj;
292
293         if (!obj)
294                 return;
295
296         if (force)
297                 goto out;
298
299         atomic_dec(&g2d_userptr->refcount);
300
301         if (atomic_read(&g2d_userptr->refcount) > 0)
302                 return;
303
304         if (g2d_userptr->in_pool)
305                 return;
306
307 out:
308         exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
309                                         DMA_BIDIRECTIONAL);
310
311         exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
312                                         g2d_userptr->npages,
313                                         g2d_userptr->vma);
314
315         if (!g2d_userptr->out_of_list)
316                 list_del_init(&g2d_userptr->list);
317
318         sg_free_table(g2d_userptr->sgt);
319         kfree(g2d_userptr->sgt);
320         g2d_userptr->sgt = NULL;
321
322         kfree(g2d_userptr->pages);
323         g2d_userptr->pages = NULL;
324         kfree(g2d_userptr);
325         g2d_userptr = NULL;
326 }
327
328 static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
329                                         unsigned long userptr,
330                                         unsigned long size,
331                                         struct drm_file *filp,
332                                         unsigned long *obj)
333 {
334         struct drm_exynos_file_private *file_priv = filp->driver_priv;
335         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
336         struct g2d_cmdlist_userptr *g2d_userptr;
337         struct g2d_data *g2d;
338         struct page **pages;
339         struct sg_table *sgt;
340         struct vm_area_struct *vma;
341         unsigned long start, end;
342         unsigned int npages, offset;
343         int ret;
344
345         if (!size) {
346                 DRM_ERROR("invalid userptr size.\n");
347                 return ERR_PTR(-EINVAL);
348         }
349
350         g2d = dev_get_drvdata(g2d_priv->dev);
351
352         /* check if userptr already exists in userptr_list. */
353         list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
354                 if (g2d_userptr->userptr == userptr) {
355                         /*
356                          * also check size because there could be same address
357                          * and different size.
358                          */
359                         if (g2d_userptr->size == size) {
360                                 atomic_inc(&g2d_userptr->refcount);
361                                 *obj = (unsigned long)g2d_userptr;
362
363                                 return &g2d_userptr->dma_addr;
364                         }
365
366                         /*
367                          * at this moment, maybe g2d dma is accessing this
368                          * g2d_userptr memory region so just remove this
369                          * g2d_userptr object from userptr_list not to be
370                          * referred again and also except it the userptr
371                          * pool to be released after the dma access completion.
372                          */
373                         g2d_userptr->out_of_list = true;
374                         g2d_userptr->in_pool = false;
375                         list_del_init(&g2d_userptr->list);
376
377                         break;
378                 }
379         }
380
381         g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
382         if (!g2d_userptr) {
383                 DRM_ERROR("failed to allocate g2d_userptr.\n");
384                 return ERR_PTR(-ENOMEM);
385         }
386
387         atomic_set(&g2d_userptr->refcount, 1);
388
389         start = userptr & PAGE_MASK;
390         offset = userptr & ~PAGE_MASK;
391         end = PAGE_ALIGN(userptr + size);
392         npages = (end - start) >> PAGE_SHIFT;
393         g2d_userptr->npages = npages;
394
395         pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
396         if (!pages) {
397                 DRM_ERROR("failed to allocate pages.\n");
398                 kfree(g2d_userptr);
399                 return ERR_PTR(-ENOMEM);
400         }
401
402         vma = find_vma(current->mm, userptr);
403         if (!vma) {
404                 DRM_ERROR("failed to get vm region.\n");
405                 ret = -EFAULT;
406                 goto err_free_pages;
407         }
408
409         if (vma->vm_end < userptr + size) {
410                 DRM_ERROR("vma is too small.\n");
411                 ret = -EFAULT;
412                 goto err_free_pages;
413         }
414
415         g2d_userptr->vma = exynos_gem_get_vma(vma);
416         if (!g2d_userptr->vma) {
417                 DRM_ERROR("failed to copy vma.\n");
418                 ret = -ENOMEM;
419                 goto err_free_pages;
420         }
421
422         g2d_userptr->size = size;
423
424         ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
425                                                 npages, pages, vma);
426         if (ret < 0) {
427                 DRM_ERROR("failed to get user pages from userptr.\n");
428                 goto err_put_vma;
429         }
430
431         g2d_userptr->pages = pages;
432
433         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
434         if (!sgt) {
435                 DRM_ERROR("failed to allocate sg table.\n");
436                 ret = -ENOMEM;
437                 goto err_free_userptr;
438         }
439
440         ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
441                                         size, GFP_KERNEL);
442         if (ret < 0) {
443                 DRM_ERROR("failed to get sgt from pages.\n");
444                 goto err_free_sgt;
445         }
446
447         g2d_userptr->sgt = sgt;
448
449         ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
450                                                 DMA_BIDIRECTIONAL);
451         if (ret < 0) {
452                 DRM_ERROR("failed to map sgt with dma region.\n");
453                 goto err_free_sgt;
454         }
455
456         g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
457         g2d_userptr->userptr = userptr;
458
459         list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
460
461         if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
462                 g2d->current_pool += npages << PAGE_SHIFT;
463                 g2d_userptr->in_pool = true;
464         }
465
466         *obj = (unsigned long)g2d_userptr;
467
468         return &g2d_userptr->dma_addr;
469
470 err_free_sgt:
471         sg_free_table(sgt);
472         kfree(sgt);
473         sgt = NULL;
474
475 err_free_userptr:
476         exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
477                                         g2d_userptr->npages,
478                                         g2d_userptr->vma);
479
480 err_put_vma:
481         exynos_gem_put_vma(g2d_userptr->vma);
482
483 err_free_pages:
484         kfree(pages);
485         kfree(g2d_userptr);
486         pages = NULL;
487         g2d_userptr = NULL;
488
489         return ERR_PTR(ret);
490 }
491
492 static void g2d_userptr_free_all(struct drm_device *drm_dev,
493                                         struct g2d_data *g2d,
494                                         struct drm_file *filp)
495 {
496         struct drm_exynos_file_private *file_priv = filp->driver_priv;
497         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
498         struct g2d_cmdlist_userptr *g2d_userptr, *n;
499
500         list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
501                 if (g2d_userptr->in_pool)
502                         g2d_userptr_put_dma_addr(drm_dev,
503                                                 (unsigned long)g2d_userptr,
504                                                 true);
505
506         g2d->current_pool = 0;
507 }
508
509 static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
510                                 struct g2d_cmdlist_node *node,
511                                 struct drm_device *drm_dev,
512                                 struct drm_file *file)
513 {
514         struct g2d_cmdlist *cmdlist = node->cmdlist;
515         int offset;
516         int i;
517
518         for (i = 0; i < node->map_nr; i++) {
519                 unsigned long handle;
520                 dma_addr_t *addr;
521
522                 offset = cmdlist->last - (i * 2 + 1);
523                 handle = cmdlist->data[offset];
524
525                 if (node->obj_type[i] == BUF_TYPE_GEM) {
526                         addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
527                                                                 file);
528                         if (IS_ERR(addr)) {
529                                 node->map_nr = i;
530                                 return -EFAULT;
531                         }
532                 } else {
533                         struct drm_exynos_g2d_userptr g2d_userptr;
534
535                         if (copy_from_user(&g2d_userptr, (void __user *)handle,
536                                 sizeof(struct drm_exynos_g2d_userptr))) {
537                                 node->map_nr = i;
538                                 return -EFAULT;
539                         }
540
541                         addr = g2d_userptr_get_dma_addr(drm_dev,
542                                                         g2d_userptr.userptr,
543                                                         g2d_userptr.size,
544                                                         file,
545                                                         &handle);
546                         if (IS_ERR(addr)) {
547                                 node->map_nr = i;
548                                 return -EFAULT;
549                         }
550                 }
551
552                 cmdlist->data[offset] = *addr;
553                 node->handles[i] = handle;
554         }
555
556         return 0;
557 }
558
559 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
560                                   struct g2d_cmdlist_node *node,
561                                   struct drm_file *filp)
562 {
563         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
564         int i;
565
566         for (i = 0; i < node->map_nr; i++) {
567                 unsigned long handle = node->handles[i];
568
569                 if (node->obj_type[i] == BUF_TYPE_GEM)
570                         exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
571                                                         filp);
572                 else
573                         g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
574                                                         false);
575
576                 node->handles[i] = 0;
577         }
578
579         node->map_nr = 0;
580 }
581
582 static void g2d_dma_start(struct g2d_data *g2d,
583                           struct g2d_runqueue_node *runqueue_node)
584 {
585         struct g2d_cmdlist_node *node =
586                                 list_first_entry(&runqueue_node->run_cmdlist,
587                                                 struct g2d_cmdlist_node, list);
588
589         pm_runtime_get_sync(g2d->dev);
590         clk_enable(g2d->gate_clk);
591
592         /* interrupt enable */
593         writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
594                         g2d->regs + G2D_INTEN);
595
596         writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
597         writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
598 }
599
600 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
601 {
602         struct g2d_runqueue_node *runqueue_node;
603
604         if (list_empty(&g2d->runqueue))
605                 return NULL;
606
607         runqueue_node = list_first_entry(&g2d->runqueue,
608                                          struct g2d_runqueue_node, list);
609         list_del_init(&runqueue_node->list);
610         return runqueue_node;
611 }
612
613 static void g2d_free_runqueue_node(struct g2d_data *g2d,
614                                    struct g2d_runqueue_node *runqueue_node)
615 {
616         struct g2d_cmdlist_node *node;
617
618         if (!runqueue_node)
619                 return;
620
621         mutex_lock(&g2d->cmdlist_mutex);
622         /*
623          * commands in run_cmdlist have been completed so unmap all gem
624          * objects in each command node so that they are unreferenced.
625          */
626         list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
627                 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
628         list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
629         mutex_unlock(&g2d->cmdlist_mutex);
630
631         kmem_cache_free(g2d->runqueue_slab, runqueue_node);
632 }
633
634 static void g2d_exec_runqueue(struct g2d_data *g2d)
635 {
636         g2d->runqueue_node = g2d_get_runqueue_node(g2d);
637         if (g2d->runqueue_node)
638                 g2d_dma_start(g2d, g2d->runqueue_node);
639 }
640
641 static void g2d_runqueue_worker(struct work_struct *work)
642 {
643         struct g2d_data *g2d = container_of(work, struct g2d_data,
644                                             runqueue_work);
645
646
647         mutex_lock(&g2d->runqueue_mutex);
648         clk_disable(g2d->gate_clk);
649         pm_runtime_put_sync(g2d->dev);
650
651         complete(&g2d->runqueue_node->complete);
652         if (g2d->runqueue_node->async)
653                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
654
655         if (g2d->suspended)
656                 g2d->runqueue_node = NULL;
657         else
658                 g2d_exec_runqueue(g2d);
659         mutex_unlock(&g2d->runqueue_mutex);
660 }
661
662 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
663 {
664         struct drm_device *drm_dev = g2d->subdrv.drm_dev;
665         struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
666         struct drm_exynos_pending_g2d_event *e;
667         struct timeval now;
668         unsigned long flags;
669
670         if (list_empty(&runqueue_node->event_list))
671                 return;
672
673         e = list_first_entry(&runqueue_node->event_list,
674                              struct drm_exynos_pending_g2d_event, base.link);
675
676         do_gettimeofday(&now);
677         e->event.tv_sec = now.tv_sec;
678         e->event.tv_usec = now.tv_usec;
679         e->event.cmdlist_no = cmdlist_no;
680
681         spin_lock_irqsave(&drm_dev->event_lock, flags);
682         list_move_tail(&e->base.link, &e->base.file_priv->event_list);
683         wake_up_interruptible(&e->base.file_priv->event_wait);
684         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
685 }
686
687 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
688 {
689         struct g2d_data *g2d = dev_id;
690         u32 pending;
691
692         pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
693         if (pending)
694                 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
695
696         if (pending & G2D_INTP_GCMD_FIN) {
697                 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
698
699                 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
700                                                 G2D_DMA_LIST_DONE_COUNT_OFFSET;
701
702                 g2d_finish_event(g2d, cmdlist_no);
703
704                 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
705                 if (!(pending & G2D_INTP_ACMD_FIN)) {
706                         writel_relaxed(G2D_DMA_CONTINUE,
707                                         g2d->regs + G2D_DMA_COMMAND);
708                 }
709         }
710
711         if (pending & G2D_INTP_ACMD_FIN)
712                 queue_work(g2d->g2d_workq, &g2d->runqueue_work);
713
714         return IRQ_HANDLED;
715 }
716
717 static int g2d_check_reg_offset(struct device *dev,
718                                 struct g2d_cmdlist_node *node,
719                                 int nr, bool for_addr)
720 {
721         struct g2d_cmdlist *cmdlist = node->cmdlist;
722         int reg_offset;
723         int index;
724         int i;
725
726         for (i = 0; i < nr; i++) {
727                 index = cmdlist->last - 2 * (i + 1);
728
729                 if (for_addr) {
730                         /* check userptr buffer type. */
731                         reg_offset = (cmdlist->data[index] &
732                                         ~0x7fffffff) >> 31;
733                         if (reg_offset) {
734                                 node->obj_type[i] = BUF_TYPE_USERPTR;
735                                 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
736                         }
737                 }
738
739                 reg_offset = cmdlist->data[index] & ~0xfffff000;
740
741                 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
742                         goto err;
743                 if (reg_offset % 4)
744                         goto err;
745
746                 switch (reg_offset) {
747                 case G2D_SRC_BASE_ADDR:
748                 case G2D_SRC_PLANE2_BASE_ADDR:
749                 case G2D_DST_BASE_ADDR:
750                 case G2D_DST_PLANE2_BASE_ADDR:
751                 case G2D_PAT_BASE_ADDR:
752                 case G2D_MSK_BASE_ADDR:
753                         if (!for_addr)
754                                 goto err;
755
756                         if (node->obj_type[i] != BUF_TYPE_USERPTR)
757                                 node->obj_type[i] = BUF_TYPE_GEM;
758                         break;
759                 default:
760                         if (for_addr)
761                                 goto err;
762                         break;
763                 }
764         }
765
766         return 0;
767
768 err:
769         dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
770         return -EINVAL;
771 }
772
773 /* ioctl functions */
774 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
775                              struct drm_file *file)
776 {
777         struct drm_exynos_g2d_get_ver *ver = data;
778
779         ver->major = G2D_HW_MAJOR_VER;
780         ver->minor = G2D_HW_MINOR_VER;
781
782         return 0;
783 }
784 EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
785
786 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
787                                  struct drm_file *file)
788 {
789         struct drm_exynos_file_private *file_priv = file->driver_priv;
790         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
791         struct device *dev = g2d_priv->dev;
792         struct g2d_data *g2d;
793         struct drm_exynos_g2d_set_cmdlist *req = data;
794         struct drm_exynos_g2d_cmd *cmd;
795         struct drm_exynos_pending_g2d_event *e;
796         struct g2d_cmdlist_node *node;
797         struct g2d_cmdlist *cmdlist;
798         unsigned long flags;
799         int size;
800         int ret;
801
802         if (!dev)
803                 return -ENODEV;
804
805         g2d = dev_get_drvdata(dev);
806         if (!g2d)
807                 return -EFAULT;
808
809         node = g2d_get_cmdlist(g2d);
810         if (!node)
811                 return -ENOMEM;
812
813         node->event = NULL;
814
815         if (req->event_type != G2D_EVENT_NOT) {
816                 spin_lock_irqsave(&drm_dev->event_lock, flags);
817                 if (file->event_space < sizeof(e->event)) {
818                         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
819                         ret = -ENOMEM;
820                         goto err;
821                 }
822                 file->event_space -= sizeof(e->event);
823                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
824
825                 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
826                 if (!e) {
827                         dev_err(dev, "failed to allocate event\n");
828
829                         spin_lock_irqsave(&drm_dev->event_lock, flags);
830                         file->event_space += sizeof(e->event);
831                         spin_unlock_irqrestore(&drm_dev->event_lock, flags);
832
833                         ret = -ENOMEM;
834                         goto err;
835                 }
836
837                 e->event.base.type = DRM_EXYNOS_G2D_EVENT;
838                 e->event.base.length = sizeof(e->event);
839                 e->event.user_data = req->user_data;
840                 e->base.event = &e->event.base;
841                 e->base.file_priv = file;
842                 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
843
844                 node->event = e;
845         }
846
847         cmdlist = node->cmdlist;
848
849         cmdlist->last = 0;
850
851         /*
852          * If don't clear SFR registers, the cmdlist is affected by register
853          * values of previous cmdlist. G2D hw executes SFR clear command and
854          * a next command at the same time then the next command is ignored and
855          * is executed rightly from next next command, so needs a dummy command
856          * to next command of SFR clear command.
857          */
858         cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
859         cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
860         cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
861         cmdlist->data[cmdlist->last++] = 0;
862
863         if (node->event) {
864                 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
865                 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
866         }
867
868         /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
869         size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
870         if (size > G2D_CMDLIST_DATA_NUM) {
871                 dev_err(dev, "cmdlist size is too big\n");
872                 ret = -EINVAL;
873                 goto err_free_event;
874         }
875
876         cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
877
878         if (copy_from_user(cmdlist->data + cmdlist->last,
879                                 (void __user *)cmd,
880                                 sizeof(*cmd) * req->cmd_nr)) {
881                 ret = -EFAULT;
882                 goto err_free_event;
883         }
884         cmdlist->last += req->cmd_nr * 2;
885
886         ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
887         if (ret < 0)
888                 goto err_free_event;
889
890         node->map_nr = req->cmd_buf_nr;
891         if (req->cmd_buf_nr) {
892                 struct drm_exynos_g2d_cmd *cmd_buf;
893
894                 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
895
896                 if (copy_from_user(cmdlist->data + cmdlist->last,
897                                         (void __user *)cmd_buf,
898                                         sizeof(*cmd_buf) * req->cmd_buf_nr)) {
899                         ret = -EFAULT;
900                         goto err_free_event;
901                 }
902                 cmdlist->last += req->cmd_buf_nr * 2;
903
904                 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
905                 if (ret < 0)
906                         goto err_free_event;
907
908                 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
909                 if (ret < 0)
910                         goto err_unmap;
911         }
912
913         cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
914         cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
915
916         /* head */
917         cmdlist->head = cmdlist->last / 2;
918
919         /* tail */
920         cmdlist->data[cmdlist->last] = 0;
921
922         g2d_add_cmdlist_to_inuse(g2d_priv, node);
923
924         return 0;
925
926 err_unmap:
927         g2d_unmap_cmdlist_gem(g2d, node, file);
928 err_free_event:
929         if (node->event) {
930                 spin_lock_irqsave(&drm_dev->event_lock, flags);
931                 file->event_space += sizeof(e->event);
932                 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
933                 kfree(node->event);
934         }
935 err:
936         g2d_put_cmdlist(g2d, node);
937         return ret;
938 }
939 EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
940
941 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
942                           struct drm_file *file)
943 {
944         struct drm_exynos_file_private *file_priv = file->driver_priv;
945         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
946         struct device *dev = g2d_priv->dev;
947         struct g2d_data *g2d;
948         struct drm_exynos_g2d_exec *req = data;
949         struct g2d_runqueue_node *runqueue_node;
950         struct list_head *run_cmdlist;
951         struct list_head *event_list;
952
953         if (!dev)
954                 return -ENODEV;
955
956         g2d = dev_get_drvdata(dev);
957         if (!g2d)
958                 return -EFAULT;
959
960         runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
961         if (!runqueue_node) {
962                 dev_err(dev, "failed to allocate memory\n");
963                 return -ENOMEM;
964         }
965         run_cmdlist = &runqueue_node->run_cmdlist;
966         event_list = &runqueue_node->event_list;
967         INIT_LIST_HEAD(run_cmdlist);
968         INIT_LIST_HEAD(event_list);
969         init_completion(&runqueue_node->complete);
970         runqueue_node->async = req->async;
971
972         list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
973         list_splice_init(&g2d_priv->event_list, event_list);
974
975         if (list_empty(run_cmdlist)) {
976                 dev_err(dev, "there is no inuse cmdlist\n");
977                 kmem_cache_free(g2d->runqueue_slab, runqueue_node);
978                 return -EPERM;
979         }
980
981         mutex_lock(&g2d->runqueue_mutex);
982         runqueue_node->pid = current->pid;
983         runqueue_node->filp = file;
984         list_add_tail(&runqueue_node->list, &g2d->runqueue);
985         if (!g2d->runqueue_node)
986                 g2d_exec_runqueue(g2d);
987         mutex_unlock(&g2d->runqueue_mutex);
988
989         if (runqueue_node->async)
990                 goto out;
991
992         wait_for_completion(&runqueue_node->complete);
993         g2d_free_runqueue_node(g2d, runqueue_node);
994
995 out:
996         return 0;
997 }
998 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
999
1000 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1001 {
1002         struct g2d_data *g2d;
1003         int ret;
1004
1005         g2d = dev_get_drvdata(dev);
1006         if (!g2d)
1007                 return -EFAULT;
1008
1009         /* allocate dma-aware cmdlist buffer. */
1010         ret = g2d_init_cmdlist(g2d);
1011         if (ret < 0) {
1012                 dev_err(dev, "cmdlist init failed\n");
1013                 return ret;
1014         }
1015
1016         if (!is_drm_iommu_supported(drm_dev))
1017                 return 0;
1018
1019         ret = drm_iommu_attach_device(drm_dev, dev);
1020         if (ret < 0) {
1021                 dev_err(dev, "failed to enable iommu.\n");
1022                 g2d_fini_cmdlist(g2d);
1023         }
1024
1025         return ret;
1026
1027 }
1028
1029 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1030 {
1031         if (!is_drm_iommu_supported(drm_dev))
1032                 return;
1033
1034         drm_iommu_detach_device(drm_dev, dev);
1035 }
1036
1037 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
1038                         struct drm_file *file)
1039 {
1040         struct drm_exynos_file_private *file_priv = file->driver_priv;
1041         struct exynos_drm_g2d_private *g2d_priv;
1042
1043         g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
1044         if (!g2d_priv) {
1045                 dev_err(dev, "failed to allocate g2d private data\n");
1046                 return -ENOMEM;
1047         }
1048
1049         g2d_priv->dev = dev;
1050         file_priv->g2d_priv = g2d_priv;
1051
1052         INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
1053         INIT_LIST_HEAD(&g2d_priv->event_list);
1054         INIT_LIST_HEAD(&g2d_priv->userptr_list);
1055
1056         return 0;
1057 }
1058
1059 static void g2d_close(struct drm_device *drm_dev, struct device *dev,
1060                         struct drm_file *file)
1061 {
1062         struct drm_exynos_file_private *file_priv = file->driver_priv;
1063         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1064         struct g2d_data *g2d;
1065         struct g2d_cmdlist_node *node, *n;
1066
1067         if (!dev)
1068                 return;
1069
1070         g2d = dev_get_drvdata(dev);
1071         if (!g2d)
1072                 return;
1073
1074         mutex_lock(&g2d->cmdlist_mutex);
1075         list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1076                 /*
1077                  * unmap all gem objects not completed.
1078                  *
1079                  * P.S. if current process was terminated forcely then
1080                  * there may be some commands in inuse_cmdlist so unmap
1081                  * them.
1082                  */
1083                 g2d_unmap_cmdlist_gem(g2d, node, file);
1084                 list_move_tail(&node->list, &g2d->free_cmdlist);
1085         }
1086         mutex_unlock(&g2d->cmdlist_mutex);
1087
1088         /* release all g2d_userptr in pool. */
1089         g2d_userptr_free_all(drm_dev, g2d, file);
1090
1091         kfree(file_priv->g2d_priv);
1092 }
1093
1094 static int g2d_probe(struct platform_device *pdev)
1095 {
1096         struct device *dev = &pdev->dev;
1097         struct resource *res;
1098         struct g2d_data *g2d;
1099         struct exynos_drm_subdrv *subdrv;
1100         int ret;
1101
1102         g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
1103         if (!g2d) {
1104                 dev_err(dev, "failed to allocate driver data\n");
1105                 return -ENOMEM;
1106         }
1107
1108         g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1109                         sizeof(struct g2d_runqueue_node), 0, 0, NULL);
1110         if (!g2d->runqueue_slab)
1111                 return -ENOMEM;
1112
1113         g2d->dev = dev;
1114
1115         g2d->g2d_workq = create_singlethread_workqueue("g2d");
1116         if (!g2d->g2d_workq) {
1117                 dev_err(dev, "failed to create workqueue\n");
1118                 ret = -EINVAL;
1119                 goto err_destroy_slab;
1120         }
1121
1122         INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
1123         INIT_LIST_HEAD(&g2d->free_cmdlist);
1124         INIT_LIST_HEAD(&g2d->runqueue);
1125
1126         mutex_init(&g2d->cmdlist_mutex);
1127         mutex_init(&g2d->runqueue_mutex);
1128
1129         g2d->gate_clk = devm_clk_get(dev, "fimg2d");
1130         if (IS_ERR(g2d->gate_clk)) {
1131                 dev_err(dev, "failed to get gate clock\n");
1132                 ret = PTR_ERR(g2d->gate_clk);
1133                 goto err_destroy_workqueue;
1134         }
1135
1136         pm_runtime_enable(dev);
1137
1138         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1139
1140         g2d->regs = devm_ioremap_resource(&pdev->dev, res);
1141         if (IS_ERR(g2d->regs)) {
1142                 ret = PTR_ERR(g2d->regs);
1143                 goto err_put_clk;
1144         }
1145
1146         g2d->irq = platform_get_irq(pdev, 0);
1147         if (g2d->irq < 0) {
1148                 dev_err(dev, "failed to get irq\n");
1149                 ret = g2d->irq;
1150                 goto err_put_clk;
1151         }
1152
1153         ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
1154                                                                 "drm_g2d", g2d);
1155         if (ret < 0) {
1156                 dev_err(dev, "irq request failed\n");
1157                 goto err_put_clk;
1158         }
1159
1160         g2d->max_pool = MAX_POOL;
1161
1162         platform_set_drvdata(pdev, g2d);
1163
1164         subdrv = &g2d->subdrv;
1165         subdrv->dev = dev;
1166         subdrv->probe = g2d_subdrv_probe;
1167         subdrv->remove = g2d_subdrv_remove;
1168         subdrv->open = g2d_open;
1169         subdrv->close = g2d_close;
1170
1171         ret = exynos_drm_subdrv_register(subdrv);
1172         if (ret < 0) {
1173                 dev_err(dev, "failed to register drm g2d device\n");
1174                 goto err_put_clk;
1175         }
1176
1177         dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
1178                         G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
1179
1180         return 0;
1181
1182 err_put_clk:
1183         pm_runtime_disable(dev);
1184 err_destroy_workqueue:
1185         destroy_workqueue(g2d->g2d_workq);
1186 err_destroy_slab:
1187         kmem_cache_destroy(g2d->runqueue_slab);
1188         return ret;
1189 }
1190
1191 static int g2d_remove(struct platform_device *pdev)
1192 {
1193         struct g2d_data *g2d = platform_get_drvdata(pdev);
1194
1195         cancel_work_sync(&g2d->runqueue_work);
1196         exynos_drm_subdrv_unregister(&g2d->subdrv);
1197
1198         while (g2d->runqueue_node) {
1199                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
1200                 g2d->runqueue_node = g2d_get_runqueue_node(g2d);
1201         }
1202
1203         pm_runtime_disable(&pdev->dev);
1204
1205         g2d_fini_cmdlist(g2d);
1206         destroy_workqueue(g2d->g2d_workq);
1207         kmem_cache_destroy(g2d->runqueue_slab);
1208
1209         return 0;
1210 }
1211
1212 #ifdef CONFIG_PM_SLEEP
1213 static int g2d_suspend(struct device *dev)
1214 {
1215         struct g2d_data *g2d = dev_get_drvdata(dev);
1216
1217         mutex_lock(&g2d->runqueue_mutex);
1218         g2d->suspended = true;
1219         mutex_unlock(&g2d->runqueue_mutex);
1220
1221         while (g2d->runqueue_node)
1222                 /* FIXME: good range? */
1223                 usleep_range(500, 1000);
1224
1225         flush_work(&g2d->runqueue_work);
1226
1227         return 0;
1228 }
1229
1230 static int g2d_resume(struct device *dev)
1231 {
1232         struct g2d_data *g2d = dev_get_drvdata(dev);
1233
1234         g2d->suspended = false;
1235         g2d_exec_runqueue(g2d);
1236
1237         return 0;
1238 }
1239 #endif
1240
1241 static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
1242
1243 #ifdef CONFIG_OF
1244 static const struct of_device_id exynos_g2d_match[] = {
1245         { .compatible = "samsung,exynos5250-g2d" },
1246         {},
1247 };
1248 MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1249 #endif
1250
1251 struct platform_driver g2d_driver = {
1252         .probe          = g2d_probe,
1253         .remove         = g2d_remove,
1254         .driver         = {
1255                 .name   = "s5p-g2d",
1256                 .owner  = THIS_MODULE,
1257                 .pm     = &g2d_pm_ops,
1258                 .of_match_table = of_match_ptr(exynos_g2d_match),
1259         },
1260 };