2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/component.h>
18 #include <linux/of_platform.h>
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_gem.h"
26 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
28 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
29 module_param(reglog, bool, 0600);
34 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
41 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
43 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
45 ptr = devm_ioremap_resource(&pdev->dev, res);
47 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
53 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
54 dbgname, ptr, (size_t)resource_size(res));
59 void etnaviv_writel(u32 data, void __iomem *addr)
62 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
67 u32 etnaviv_readl(const void __iomem *addr)
69 u32 val = readl(addr);
72 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
82 static void load_gpu(struct drm_device *dev)
84 struct etnaviv_drm_private *priv = dev->dev_private;
87 for (i = 0; i < ETNA_MAX_PIPES; i++) {
88 struct etnaviv_gpu *g = priv->gpu[i];
93 ret = etnaviv_gpu_init(g);
100 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
102 struct etnaviv_file_private *ctx;
104 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
108 file->driver_priv = ctx;
113 static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
115 struct etnaviv_drm_private *priv = dev->dev_private;
116 struct etnaviv_file_private *ctx = file->driver_priv;
119 for (i = 0; i < ETNA_MAX_PIPES; i++) {
120 struct etnaviv_gpu *gpu = priv->gpu[i];
123 mutex_lock(&gpu->lock);
124 if (gpu->lastctx == ctx)
126 mutex_unlock(&gpu->lock);
137 #ifdef CONFIG_DEBUG_FS
138 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
140 struct etnaviv_drm_private *priv = dev->dev_private;
142 etnaviv_gem_describe_objects(priv, m);
147 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
151 read_lock(&dev->vma_offset_manager->vm_lock);
152 ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
153 read_unlock(&dev->vma_offset_manager->vm_lock);
158 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
160 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
162 mutex_lock(&gpu->mmu->lock);
163 drm_mm_dump_table(m, &gpu->mmu->mm);
164 mutex_unlock(&gpu->mmu->lock);
169 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
171 struct etnaviv_cmdbuf *buf = gpu->buffer;
172 u32 size = buf->size;
173 u32 *ptr = buf->vaddr;
176 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
177 buf->vaddr, (u64)buf->paddr, size - buf->user_size);
179 for (i = 0; i < size / 4; i++) {
183 seq_printf(m, "\t0x%p: ", ptr + i);
184 seq_printf(m, "%08x ", *(ptr + i));
189 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
191 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
193 mutex_lock(&gpu->lock);
194 etnaviv_buffer_dump(gpu, m);
195 mutex_unlock(&gpu->lock);
200 static int show_unlocked(struct seq_file *m, void *arg)
202 struct drm_info_node *node = (struct drm_info_node *) m->private;
203 struct drm_device *dev = node->minor->dev;
204 int (*show)(struct drm_device *dev, struct seq_file *m) =
205 node->info_ent->data;
210 static int show_each_gpu(struct seq_file *m, void *arg)
212 struct drm_info_node *node = (struct drm_info_node *) m->private;
213 struct drm_device *dev = node->minor->dev;
214 struct etnaviv_drm_private *priv = dev->dev_private;
215 struct etnaviv_gpu *gpu;
216 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
217 node->info_ent->data;
221 for (i = 0; i < ETNA_MAX_PIPES; i++) {
234 static struct drm_info_list etnaviv_debugfs_list[] = {
235 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
236 {"gem", show_unlocked, 0, etnaviv_gem_show},
237 { "mm", show_unlocked, 0, etnaviv_mm_show },
238 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
239 {"ring", show_each_gpu, 0, etnaviv_ring_show},
242 static int etnaviv_debugfs_init(struct drm_minor *minor)
244 struct drm_device *dev = minor->dev;
247 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
248 ARRAY_SIZE(etnaviv_debugfs_list),
249 minor->debugfs_root, minor);
252 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
259 static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
261 drm_debugfs_remove_files(etnaviv_debugfs_list,
262 ARRAY_SIZE(etnaviv_debugfs_list), minor);
270 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
271 struct drm_file *file)
273 struct etnaviv_drm_private *priv = dev->dev_private;
274 struct drm_etnaviv_param *args = data;
275 struct etnaviv_gpu *gpu;
277 if (args->pipe >= ETNA_MAX_PIPES)
280 gpu = priv->gpu[args->pipe];
284 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
287 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
288 struct drm_file *file)
290 struct drm_etnaviv_gem_new *args = data;
292 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
296 return etnaviv_gem_new_handle(dev, file, args->size,
297 args->flags, &args->handle);
300 #define TS(t) ((struct timespec){ \
301 .tv_sec = (t).tv_sec, \
302 .tv_nsec = (t).tv_nsec \
305 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
306 struct drm_file *file)
308 struct drm_etnaviv_gem_cpu_prep *args = data;
309 struct drm_gem_object *obj;
312 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
315 obj = drm_gem_object_lookup(file, args->handle);
319 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
321 drm_gem_object_unreference_unlocked(obj);
326 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
327 struct drm_file *file)
329 struct drm_etnaviv_gem_cpu_fini *args = data;
330 struct drm_gem_object *obj;
336 obj = drm_gem_object_lookup(file, args->handle);
340 ret = etnaviv_gem_cpu_fini(obj);
342 drm_gem_object_unreference_unlocked(obj);
347 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
348 struct drm_file *file)
350 struct drm_etnaviv_gem_info *args = data;
351 struct drm_gem_object *obj;
357 obj = drm_gem_object_lookup(file, args->handle);
361 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
362 drm_gem_object_unreference_unlocked(obj);
367 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
368 struct drm_file *file)
370 struct drm_etnaviv_wait_fence *args = data;
371 struct etnaviv_drm_private *priv = dev->dev_private;
372 struct timespec *timeout = &TS(args->timeout);
373 struct etnaviv_gpu *gpu;
375 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
378 if (args->pipe >= ETNA_MAX_PIPES)
381 gpu = priv->gpu[args->pipe];
385 if (args->flags & ETNA_WAIT_NONBLOCK)
388 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
392 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
393 struct drm_file *file)
395 struct drm_etnaviv_gem_userptr *args = data;
398 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
402 if (offset_in_page(args->user_ptr | args->user_size) ||
403 (uintptr_t)args->user_ptr != args->user_ptr ||
404 (u32)args->user_size != args->user_size ||
405 args->user_ptr & ~PAGE_MASK)
408 if (args->flags & ETNA_USERPTR_WRITE)
409 access = VERIFY_WRITE;
411 access = VERIFY_READ;
413 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
417 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
418 args->user_size, args->flags,
422 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
423 struct drm_file *file)
425 struct etnaviv_drm_private *priv = dev->dev_private;
426 struct drm_etnaviv_gem_wait *args = data;
427 struct timespec *timeout = &TS(args->timeout);
428 struct drm_gem_object *obj;
429 struct etnaviv_gpu *gpu;
432 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
435 if (args->pipe >= ETNA_MAX_PIPES)
438 gpu = priv->gpu[args->pipe];
442 obj = drm_gem_object_lookup(file, args->handle);
446 if (args->flags & ETNA_WAIT_NONBLOCK)
449 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
451 drm_gem_object_unreference_unlocked(obj);
456 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
457 #define ETNA_IOCTL(n, func, flags) \
458 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
459 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
467 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
470 static const struct vm_operations_struct vm_ops = {
471 .fault = etnaviv_gem_fault,
472 .open = drm_gem_vm_open,
473 .close = drm_gem_vm_close,
476 static const struct file_operations fops = {
477 .owner = THIS_MODULE,
479 .release = drm_release,
480 .unlocked_ioctl = drm_ioctl,
482 .compat_ioctl = drm_compat_ioctl,
487 .mmap = etnaviv_gem_mmap,
490 static struct drm_driver etnaviv_drm_driver = {
491 .driver_features = DRIVER_HAVE_IRQ |
495 .open = etnaviv_open,
496 .preclose = etnaviv_preclose,
497 .gem_free_object_unlocked = etnaviv_gem_free_object,
498 .gem_vm_ops = &vm_ops,
499 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
500 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
501 .gem_prime_export = drm_gem_prime_export,
502 .gem_prime_import = drm_gem_prime_import,
503 .gem_prime_pin = etnaviv_gem_prime_pin,
504 .gem_prime_unpin = etnaviv_gem_prime_unpin,
505 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
506 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
507 .gem_prime_vmap = etnaviv_gem_prime_vmap,
508 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
509 #ifdef CONFIG_DEBUG_FS
510 .debugfs_init = etnaviv_debugfs_init,
511 .debugfs_cleanup = etnaviv_debugfs_cleanup,
513 .ioctls = etnaviv_ioctls,
514 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
517 .desc = "etnaviv DRM",
526 static int etnaviv_bind(struct device *dev)
528 struct etnaviv_drm_private *priv;
529 struct drm_device *drm;
532 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
536 drm->platformdev = to_platform_device(dev);
538 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
540 dev_err(dev, "failed to allocate private data\n");
544 drm->dev_private = priv;
546 priv->wq = alloc_ordered_workqueue("etnaviv", 0);
552 mutex_init(&priv->gem_lock);
553 INIT_LIST_HEAD(&priv->gem_list);
556 dev_set_drvdata(dev, drm);
558 ret = component_bind_all(dev, drm);
564 ret = drm_dev_register(drm, 0);
571 component_unbind_all(dev, drm);
573 flush_workqueue(priv->wq);
574 destroy_workqueue(priv->wq);
583 static void etnaviv_unbind(struct device *dev)
585 struct drm_device *drm = dev_get_drvdata(dev);
586 struct etnaviv_drm_private *priv = drm->dev_private;
588 drm_dev_unregister(drm);
590 flush_workqueue(priv->wq);
591 destroy_workqueue(priv->wq);
593 component_unbind_all(dev, drm);
595 drm->dev_private = NULL;
601 static const struct component_master_ops etnaviv_master_ops = {
602 .bind = etnaviv_bind,
603 .unbind = etnaviv_unbind,
606 static int compare_of(struct device *dev, void *data)
608 struct device_node *np = data;
610 return dev->of_node == np;
613 static int compare_str(struct device *dev, void *data)
615 return !strcmp(dev_name(dev), data);
618 static int etnaviv_pdev_probe(struct platform_device *pdev)
620 struct device *dev = &pdev->dev;
621 struct device_node *node = dev->of_node;
622 struct component_match *match = NULL;
624 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
627 struct device_node *core_node;
631 core_node = of_parse_phandle(node, "cores", i);
635 component_match_add(&pdev->dev, &match, compare_of,
637 of_node_put(core_node);
639 } else if (dev->platform_data) {
640 char **names = dev->platform_data;
643 for (i = 0; names[i]; i++)
644 component_match_add(dev, &match, compare_str, names[i]);
647 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
650 static int etnaviv_pdev_remove(struct platform_device *pdev)
652 component_master_del(&pdev->dev, &etnaviv_master_ops);
657 static const struct of_device_id dt_match[] = {
658 { .compatible = "fsl,imx-gpu-subsystem" },
659 { .compatible = "marvell,dove-gpu-subsystem" },
662 MODULE_DEVICE_TABLE(of, dt_match);
664 static struct platform_driver etnaviv_platform_driver = {
665 .probe = etnaviv_pdev_probe,
666 .remove = etnaviv_pdev_remove,
669 .of_match_table = dt_match,
673 static int __init etnaviv_init(void)
677 etnaviv_validate_init();
679 ret = platform_driver_register(&etnaviv_gpu_driver);
683 ret = platform_driver_register(&etnaviv_platform_driver);
685 platform_driver_unregister(&etnaviv_gpu_driver);
689 module_init(etnaviv_init);
691 static void __exit etnaviv_exit(void)
693 platform_driver_unregister(&etnaviv_gpu_driver);
694 platform_driver_unregister(&etnaviv_platform_driver);
696 module_exit(etnaviv_exit);
698 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
699 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
700 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
701 MODULE_DESCRIPTION("etnaviv DRM Driver");
702 MODULE_LICENSE("GPL v2");
703 MODULE_ALIAS("platform:etnaviv");