2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Inki Dae <inki.dae@samsung.com>
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/pm_runtime.h>
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_crtc_helper.h>
20 #include <linux/component.h>
22 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_crtc.h"
26 #include "exynos_drm_fbdev.h"
27 #include "exynos_drm_fb.h"
28 #include "exynos_drm_gem.h"
29 #include "exynos_drm_plane.h"
30 #include "exynos_drm_vidi.h"
31 #include "exynos_drm_g2d.h"
32 #include "exynos_drm_ipp.h"
33 #include "exynos_drm_iommu.h"
35 #define DRIVER_NAME "exynos"
36 #define DRIVER_DESC "Samsung SoC DRM"
37 #define DRIVER_DATE "20110530"
38 #define DRIVER_MAJOR 1
39 #define DRIVER_MINOR 0
41 struct exynos_atomic_commit {
42 struct work_struct work;
43 struct drm_device *dev;
44 struct drm_atomic_state *state;
48 static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
50 struct drm_crtc_state *crtc_state;
51 struct drm_crtc *crtc;
54 for_each_crtc_in_state(state, crtc, crtc_state, i) {
55 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
57 if (!crtc->state->enable)
60 ret = drm_crtc_vblank_get(crtc);
64 exynos_drm_crtc_wait_pending_update(exynos_crtc);
65 drm_crtc_vblank_put(crtc);
69 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
71 struct drm_device *dev = commit->dev;
72 struct exynos_drm_private *priv = dev->dev_private;
73 struct drm_atomic_state *state = commit->state;
74 struct drm_plane *plane;
75 struct drm_crtc *crtc;
76 struct drm_plane_state *plane_state;
77 struct drm_crtc_state *crtc_state;
80 drm_atomic_helper_commit_modeset_disables(dev, state);
82 drm_atomic_helper_commit_modeset_enables(dev, state);
85 * Exynos can't update planes with CRTCs and encoders disabled,
86 * its updates routines, specially for FIMD, requires the clocks
87 * to be enabled. So it is necessary to handle the modeset operations
88 * *before* the commit_planes() step, this way it will always
89 * have the relevant clocks enabled to perform the update.
92 for_each_crtc_in_state(state, crtc, crtc_state, i) {
93 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
95 atomic_set(&exynos_crtc->pending_update, 0);
98 for_each_plane_in_state(state, plane, plane_state, i) {
99 struct exynos_drm_crtc *exynos_crtc =
100 to_exynos_crtc(plane->crtc);
105 atomic_inc(&exynos_crtc->pending_update);
108 drm_atomic_helper_commit_planes(dev, state, false);
110 exynos_atomic_wait_for_commit(state);
112 drm_atomic_helper_cleanup_planes(dev, state);
114 drm_atomic_state_free(state);
116 spin_lock(&priv->lock);
117 priv->pending &= ~commit->crtcs;
118 spin_unlock(&priv->lock);
120 wake_up_all(&priv->wait);
125 static void exynos_drm_atomic_work(struct work_struct *work)
127 struct exynos_atomic_commit *commit = container_of(work,
128 struct exynos_atomic_commit, work);
130 exynos_atomic_commit_complete(commit);
133 static struct device *exynos_drm_get_dma_device(void);
135 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
137 struct exynos_drm_private *private;
138 struct drm_encoder *encoder;
139 unsigned int clone_mask;
142 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
146 init_waitqueue_head(&private->wait);
147 spin_lock_init(&private->lock);
149 dev_set_drvdata(dev->dev, dev);
150 dev->dev_private = (void *)private;
152 /* the first real CRTC device is used for all dma mapping operations */
153 private->dma_dev = exynos_drm_get_dma_device();
154 if (!private->dma_dev) {
155 DRM_ERROR("no device found for DMA mapping operations.\n");
157 goto err_free_private;
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev));
162 /* create common IOMMU mapping for all devices attached to Exynos DRM */
163 ret = drm_create_iommu_mapping(dev);
165 DRM_ERROR("failed to create iommu mapping.\n");
166 goto err_free_private;
169 drm_mode_config_init(dev);
171 exynos_drm_mode_config_init(dev);
173 /* setup possible_clones. */
176 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
177 clone_mask |= (1 << (cnt++));
179 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
180 encoder->possible_clones = clone_mask;
182 platform_set_drvdata(dev->platformdev, dev);
184 /* Try to bind all sub drivers. */
185 ret = component_bind_all(dev->dev, dev);
187 goto err_mode_config_cleanup;
189 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
193 /* Probe non kms sub drivers and virtual display driver. */
194 ret = exynos_drm_device_subdrv_probe(dev);
196 goto err_cleanup_vblank;
198 drm_mode_config_reset(dev);
201 * enable drm irq mode.
202 * - with irq_enabled = true, we can use the vblank feature.
204 * P.S. note that we wouldn't use drm irq handler but
205 * just specific driver own one instead because
206 * drm framework supports only one irq handler.
208 dev->irq_enabled = true;
210 /* init kms poll for handling hpd */
211 drm_kms_helper_poll_init(dev);
213 /* force connectors detection */
214 drm_helper_hpd_irq_event(dev);
219 drm_vblank_cleanup(dev);
221 component_unbind_all(dev->dev, dev);
222 err_mode_config_cleanup:
223 drm_mode_config_cleanup(dev);
224 drm_release_iommu_mapping(dev);
231 static int exynos_drm_unload(struct drm_device *dev)
233 exynos_drm_device_subdrv_remove(dev);
235 exynos_drm_fbdev_fini(dev);
236 drm_kms_helper_poll_fini(dev);
238 drm_vblank_cleanup(dev);
239 component_unbind_all(dev->dev, dev);
240 drm_mode_config_cleanup(dev);
241 drm_release_iommu_mapping(dev);
243 kfree(dev->dev_private);
244 dev->dev_private = NULL;
249 static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
253 spin_lock(&priv->lock);
254 pending = priv->pending & crtcs;
255 spin_unlock(&priv->lock);
260 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
263 struct exynos_drm_private *priv = dev->dev_private;
264 struct exynos_atomic_commit *commit;
265 struct drm_crtc *crtc;
266 struct drm_crtc_state *crtc_state;
269 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
273 ret = drm_atomic_helper_prepare_planes(dev, state);
279 /* This is the point of no return */
281 INIT_WORK(&commit->work, exynos_drm_atomic_work);
283 commit->state = state;
285 /* Wait until all affected CRTCs have completed previous commits and
286 * mark them as pending.
288 for_each_crtc_in_state(state, crtc, crtc_state, i)
289 commit->crtcs |= drm_crtc_mask(crtc);
291 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
293 spin_lock(&priv->lock);
294 priv->pending |= commit->crtcs;
295 spin_unlock(&priv->lock);
297 drm_atomic_helper_swap_state(state, true);
300 schedule_work(&commit->work);
302 exynos_atomic_commit_complete(commit);
307 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
309 struct drm_exynos_file_private *file_priv;
312 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
316 file->driver_priv = file_priv;
318 ret = exynos_drm_subdrv_open(dev, file);
320 goto err_file_priv_free;
326 file->driver_priv = NULL;
330 static void exynos_drm_preclose(struct drm_device *dev,
331 struct drm_file *file)
333 struct drm_crtc *crtc;
335 exynos_drm_subdrv_close(dev, file);
337 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
338 exynos_drm_crtc_cancel_page_flip(crtc, file);
341 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
343 kfree(file->driver_priv);
344 file->driver_priv = NULL;
347 static void exynos_drm_lastclose(struct drm_device *dev)
349 exynos_drm_fbdev_restore_mode(dev);
352 static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
353 .fault = exynos_drm_gem_fault,
354 .open = drm_gem_vm_open,
355 .close = drm_gem_vm_close,
358 static const struct drm_ioctl_desc exynos_ioctls[] = {
359 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
360 DRM_AUTH | DRM_RENDER_ALLOW),
361 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
362 DRM_AUTH | DRM_RENDER_ALLOW),
363 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
365 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
367 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
368 DRM_AUTH | DRM_RENDER_ALLOW),
369 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
370 DRM_AUTH | DRM_RENDER_ALLOW),
371 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
372 DRM_AUTH | DRM_RENDER_ALLOW),
373 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
374 DRM_AUTH | DRM_RENDER_ALLOW),
375 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
376 DRM_AUTH | DRM_RENDER_ALLOW),
377 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
378 DRM_AUTH | DRM_RENDER_ALLOW),
379 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
380 DRM_AUTH | DRM_RENDER_ALLOW),
383 static const struct file_operations exynos_drm_driver_fops = {
384 .owner = THIS_MODULE,
386 .mmap = exynos_drm_gem_mmap,
389 .unlocked_ioctl = drm_ioctl,
391 .compat_ioctl = drm_compat_ioctl,
393 .release = drm_release,
396 static struct drm_driver exynos_drm_driver = {
397 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
398 | DRIVER_ATOMIC | DRIVER_RENDER,
399 .load = exynos_drm_load,
400 .unload = exynos_drm_unload,
401 .open = exynos_drm_open,
402 .preclose = exynos_drm_preclose,
403 .lastclose = exynos_drm_lastclose,
404 .postclose = exynos_drm_postclose,
405 .get_vblank_counter = drm_vblank_no_hw_counter,
406 .enable_vblank = exynos_drm_crtc_enable_vblank,
407 .disable_vblank = exynos_drm_crtc_disable_vblank,
408 .gem_free_object_unlocked = exynos_drm_gem_free_object,
409 .gem_vm_ops = &exynos_drm_gem_vm_ops,
410 .dumb_create = exynos_drm_gem_dumb_create,
411 .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
412 .dumb_destroy = drm_gem_dumb_destroy,
413 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
414 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
415 .gem_prime_export = drm_gem_prime_export,
416 .gem_prime_import = drm_gem_prime_import,
417 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
418 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
419 .gem_prime_vmap = exynos_drm_gem_prime_vmap,
420 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
421 .gem_prime_mmap = exynos_drm_gem_prime_mmap,
422 .ioctls = exynos_ioctls,
423 .num_ioctls = ARRAY_SIZE(exynos_ioctls),
424 .fops = &exynos_drm_driver_fops,
428 .major = DRIVER_MAJOR,
429 .minor = DRIVER_MINOR,
432 #ifdef CONFIG_PM_SLEEP
433 static int exynos_drm_suspend(struct device *dev)
435 struct drm_device *drm_dev = dev_get_drvdata(dev);
436 struct drm_connector *connector;
438 if (pm_runtime_suspended(dev) || !drm_dev)
441 drm_modeset_lock_all(drm_dev);
442 drm_for_each_connector(connector, drm_dev) {
443 int old_dpms = connector->dpms;
445 if (connector->funcs->dpms)
446 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
448 /* Set the old mode back to the connector for resume */
449 connector->dpms = old_dpms;
451 drm_modeset_unlock_all(drm_dev);
456 static int exynos_drm_resume(struct device *dev)
458 struct drm_device *drm_dev = dev_get_drvdata(dev);
459 struct drm_connector *connector;
461 if (pm_runtime_suspended(dev) || !drm_dev)
464 drm_modeset_lock_all(drm_dev);
465 drm_for_each_connector(connector, drm_dev) {
466 if (connector->funcs->dpms) {
467 int dpms = connector->dpms;
469 connector->dpms = DRM_MODE_DPMS_OFF;
470 connector->funcs->dpms(connector, dpms);
473 drm_modeset_unlock_all(drm_dev);
479 static const struct dev_pm_ops exynos_drm_pm_ops = {
480 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
483 /* forward declaration */
484 static struct platform_driver exynos_drm_platform_driver;
486 struct exynos_drm_driver_info {
487 struct platform_driver *driver;
491 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
492 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
493 #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
495 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
498 * Connector drivers should not be placed before associated crtc drivers,
499 * because connector requires pipe number of its crtc during initialization.
501 static struct exynos_drm_driver_info exynos_drm_drivers[] = {
503 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
504 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
506 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
507 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
509 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
510 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
512 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
513 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
515 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
518 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
521 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
524 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
527 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
528 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
530 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
532 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
534 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
536 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
538 DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
541 &exynos_drm_platform_driver,
546 static int compare_dev(struct device *dev, void *data)
548 return dev == (struct device *)data;
551 static struct component_match *exynos_drm_match_add(struct device *dev)
553 struct component_match *match = NULL;
556 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
557 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
558 struct device *p = NULL, *d;
560 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
563 while ((d = bus_find_device(&platform_bus_type, p,
564 &info->driver->driver,
565 (void *)platform_bus_type.match))) {
567 component_match_add(dev, &match, compare_dev, d);
573 return match ?: ERR_PTR(-ENODEV);
576 static int exynos_drm_bind(struct device *dev)
578 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
581 static void exynos_drm_unbind(struct device *dev)
583 drm_put_dev(dev_get_drvdata(dev));
586 static const struct component_master_ops exynos_drm_ops = {
587 .bind = exynos_drm_bind,
588 .unbind = exynos_drm_unbind,
591 static int exynos_drm_platform_probe(struct platform_device *pdev)
593 struct component_match *match;
595 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
596 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
598 match = exynos_drm_match_add(&pdev->dev);
600 return PTR_ERR(match);
602 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
606 static int exynos_drm_platform_remove(struct platform_device *pdev)
608 component_master_del(&pdev->dev, &exynos_drm_ops);
612 static struct platform_driver exynos_drm_platform_driver = {
613 .probe = exynos_drm_platform_probe,
614 .remove = exynos_drm_platform_remove,
616 .name = "exynos-drm",
617 .pm = &exynos_drm_pm_ops,
621 static struct device *exynos_drm_get_dma_device(void)
625 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
626 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
629 if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
632 while ((dev = bus_find_device(&platform_bus_type, NULL,
633 &info->driver->driver,
634 (void *)platform_bus_type.match))) {
642 static void exynos_drm_unregister_devices(void)
646 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
647 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
650 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
653 while ((dev = bus_find_device(&platform_bus_type, NULL,
654 &info->driver->driver,
655 (void *)platform_bus_type.match))) {
657 platform_device_unregister(to_platform_device(dev));
662 static int exynos_drm_register_devices(void)
664 struct platform_device *pdev;
667 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
668 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
670 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
673 pdev = platform_device_register_simple(
674 info->driver->driver.name, -1, NULL, 0);
681 exynos_drm_unregister_devices();
682 return PTR_ERR(pdev);
685 static void exynos_drm_unregister_drivers(void)
689 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
690 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
695 platform_driver_unregister(info->driver);
699 static int exynos_drm_register_drivers(void)
703 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
704 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
709 ret = platform_driver_register(info->driver);
715 exynos_drm_unregister_drivers();
719 static int exynos_drm_init(void)
723 ret = exynos_drm_register_devices();
727 ret = exynos_drm_register_drivers();
729 goto err_unregister_pdevs;
733 err_unregister_pdevs:
734 exynos_drm_unregister_devices();
739 static void exynos_drm_exit(void)
741 exynos_drm_unregister_drivers();
742 exynos_drm_unregister_devices();
745 module_init(exynos_drm_init);
746 module_exit(exynos_drm_exit);
748 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
749 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
750 MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
751 MODULE_DESCRIPTION("Samsung SoC DRM Driver");
752 MODULE_LICENSE("GPL");