#include "exynos_drm_gem.h"
#include "exynos_trace.h"
-#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
- drm_crtc)
-
-/*
- * Exynos specific crtc structure.
- *
- * @drm_crtc: crtc object.
- * @overlay: contain information common to display controller and hdmi and
- * contents of this overlay object would be copied to sub driver size.
- * @pipe: a crtc index created at load() with a new crtc object creation
- * and the crtc object would be set to private->crtc array
- * to get a crtc object corresponding to this pipe from private->crtc
- * array when irq interrupt occured. the reason of using this pipe is that
- * drm framework doesn't support multiple irq yet.
- * we can refer to the crtc to current hardware interrupt occured through
- * this pipe value.
- * @dpms: store the crtc dpms value
- */
-struct exynos_drm_crtc {
- struct drm_crtc drm_crtc;
- struct exynos_drm_overlay overlay;
- unsigned int pipe;
- unsigned int dpms;
-};
-
static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
return 0;
}
+static void exynos_drm_crtc_page_flip_apply(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
+ int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct exynos_drm_gem_buf *buffer;
+
+ buffer = exynos_drm_fb_buffer(fb, i);
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
+
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
+
+ exynos_drm_fn_encoder(crtc, overlay,
+ exynos_drm_encoder_crtc_page_flip);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
+}
+
static int exynos_drm_crtc_update(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
int mode = DRM_MODE_DPMS_ON;
+ /*
+ * TODO(seanpaul): This has the nasty habit of calling the
+ * underlying dpms/power callbacks twice on boot. This code
+ * needs to be cleaned up so this doesn't happen.
+ */
+
/*
* enable hardware(power on) to all encoders hdmi connected
* to current crtc.
};
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
-#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
-/*
- * exynos specific framebuffer structure.
- *
- * @fb: drm framebuffer obejct.
- * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
-*/
-struct exynos_drm_fb {
- struct drm_framebuffer fb;
- struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
-};
-
void exynos_drm_kds_callback(void *callback_parameter, void *callback_extra_parameter)
{
- struct drm_crtc *crtc = (struct drm_crtc *)callback_parameter;
+ struct drm_framebuffer *fb = callback_parameter;
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ struct drm_crtc *crtc = exynos_fb->crtc;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = callback_extra_parameter;
- struct drm_framebuffer *old_fb = crtc->fb;
- int ret = -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
-
- crtc->fb = fb;
- ret = exynos_drm_crtc_update(crtc);
- if (ret) {
- crtc->fb = old_fb;
- mutex_unlock(&dev->struct_mutex);
- goto callback_out;
- }
- /*
- * the values related to a buffer of the drm framebuffer
- * to be applied should be set at here. because these values
- * first, are set to shadow registers and then to
- * real registers at vsync front porch period.
- */
- exynos_drm_crtc_apply(crtc);
-
- mutex_unlock(&dev->struct_mutex);
-callback_out:
- exynos_drm_wait_for_vsync(dev);
- if (dev_priv->old_kds_res_set != NULL) {
- kds_resource_set_release(&dev_priv->old_kds_res_set);
- dev_priv->old_kds_res_set = NULL;
- }
- if (dev_priv->old_dma_buf != NULL) {
- dma_buf_put(dev_priv->old_dma_buf);
- dev_priv->old_dma_buf = NULL;
+ struct kds_resource_set **pkds = callback_extra_parameter;
+ struct kds_resource_set *prev_kds;
+ unsigned long flags;
+
+ exynos_drm_crtc_page_flip_apply(crtc, fb);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ prev_kds = exynos_crtc->pending_kds;
+ exynos_crtc->pending_kds = *pkds;
+ *pkds = NULL;
+ if (prev_kds)
+ exynos_crtc->flip_in_flight--;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (prev_kds) {
+ DRM_ERROR("previous work detected\n");
+ kds_resource_set_release(&prev_kds);
+ } else {
+ BUG_ON(atomic_read(&exynos_crtc->flip_pending));
+ atomic_set(&exynos_crtc->flip_pending, 1);
}
}
#endif
+static void exynos_drm_crtc_flip_complete(struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_exynos_fake_flip_complete(e->pipe);
+}
+
static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- int ret = -EINVAL;
+ unsigned long flags;
+ int ret;
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
struct exynos_drm_gem_obj *gem_ob = (struct exynos_drm_gem_obj *)exynos_fb->exynos_gem_obj[0];
+ struct kds_resource_set **pkds;
+ struct drm_pending_vblank_event *event_to_send;
#endif
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* Record both request and complete of page flip within the function
- * since this implementation is blocking in exynos_drm_crtc_update.
+ /* msb: The event flag is optional but exynos does not support it. */
+ if (!event) {
+ DRM_ERROR("called page_flip with empty event flag\n");
+ return -EINVAL;
+ }
+
+ /*
+ * the pipe from user always is 0 so we can set pipe number
+ * of current owner to event.
*/
- trace_exynos_flip_request(exynos_crtc->pipe);
+ event->pipe = exynos_crtc->pipe;
+
+ ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ if (ret) {
+ DRM_ERROR("Unable to get vblank\n");
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (exynos_crtc->flip_in_flight > 1) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ ret = -EBUSY;
+ goto fail_max_in_flight;
+ }
+ /* Signal previous flip event. Or if none in flight signal current. */
+ if (exynos_crtc->flip_in_flight) {
+ event_to_send = exynos_crtc->event;
+ exynos_crtc->event = event;
+ } else {
+ event_to_send = event;
+ exynos_crtc->event = NULL;
+ }
+ pkds = &exynos_crtc->future_kds;
+ if (*pkds)
+ pkds = &exynos_crtc->future_kds_extra;
+ *pkds = ERR_PTR(-EINVAL); /* Make it non-NULL */
+ exynos_crtc->flip_in_flight++;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+#endif
+
+ mutex_lock(&dev->struct_mutex);
+
+ crtc->fb = fb;
+
+ mutex_unlock(&dev->struct_mutex);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ exynos_fb->crtc = crtc;
+ if (gem_ob->base.export_dma_buf) {
+ struct dma_buf *buf = gem_ob->base.export_dma_buf;
+ unsigned long shared = 0UL;
+ struct kds_resource *res_list = get_dma_buf_kds_resource(buf);
- if (event) {
/*
- * the pipe from user always is 0 so we can set pipe number
- * of current owner to event.
+ * If we don't already have a reference to the dma_buf,
+ * grab one now. We'll release it in exynos_drm_fb_destory().
*/
- event->pipe = exynos_crtc->pipe;
+ if (!exynos_fb->dma_buf) {
+ get_dma_buf(buf);
+ exynos_fb->dma_buf = buf;
+ }
+ BUG_ON(exynos_fb->dma_buf != buf);
- ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ /* Waiting for the KDS resource*/
+ ret = kds_async_waitall(pkds, KDS_FLAG_LOCKED_WAIT,
+ &dev_priv->kds_cb, fb, pkds, 1,
+ &shared, &res_list);
if (ret) {
- DRM_DEBUG("failed to acquire vblank counter\n");
- goto out;
+ DRM_ERROR("kds_async_waitall failed: %d\n", ret);
+ goto fail_kds;
}
+ } else {
+ *pkds = NULL;
+ DRM_ERROR("flipping a non-kds buffer\n");
+ exynos_drm_kds_callback(fb, pkds);
+ }
+ if (event_to_send) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ exynos_drm_crtc_flip_complete(event_to_send);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+#endif
+
+ trace_exynos_flip_request(exynos_crtc->pipe);
- list_add_tail(&event->base.link,
- &dev_priv->pageflip_event_list);
+ return 0;
-#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
- if (dev_priv->old_kds_res_set != NULL)
- kds_resource_set_release(&dev_priv->old_kds_res_set);
- dev_priv->old_kds_res_set = dev_priv->kds_res_set;
+fail_kds:
+ *pkds = NULL;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ exynos_crtc->flip_in_flight--;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+fail_max_in_flight:
+ drm_vblank_put(dev, exynos_crtc->pipe);
+ return ret;
+}
- if (dev_priv->old_dma_buf != NULL)
- dma_buf_put(dev_priv->old_dma_buf);
- dev_priv->old_dma_buf = dev_priv->dma_buf;
+void exynos_drm_crtc_finish_pageflip(struct drm_device *drm_dev, int crtc_idx)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->crtc[crtc_idx];
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct kds_resource_set *kds;
+ unsigned long flags;
- if (gem_ob->base.export_dma_buf) {
- struct dma_buf *buf = gem_ob->base.export_dma_buf;
- unsigned long shared[1] = {0};
- struct kds_resource *resource_list[1] = {get_dma_buf_kds_resource(buf)};
+ /* set wait vsync event to zero and wake up queue. */
+ atomic_set(&dev_priv->wait_vsync_event, 0);
+ DRM_WAKEUP(&dev_priv->wait_vsync_queue);
- get_dma_buf(buf);
- dev_priv->dma_buf = buf;
-
- /* Waiting for the KDS resource*/
- kds_async_waitall(&dev_priv->kds_res_set, KDS_FLAG_LOCKED_WAIT,
- &dev_priv->kds_cb, crtc, fb, 1, shared, resource_list);
- } else {
- exynos_drm_kds_callback(crtc, fb);
- dev_priv->kds_res_set = NULL;
- dev_priv->dma_buf = NULL;
- }
-#endif
+ if (!atomic_cmpxchg(&exynos_crtc->flip_pending, 1, 0))
+ return;
+
+ trace_exynos_flip_complete(crtc_idx);
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (exynos_crtc->event) {
+ exynos_drm_crtc_flip_complete(exynos_crtc->event);
+ exynos_crtc->event = NULL;
}
-out:
+ kds = exynos_crtc->current_kds;
+ exynos_crtc->current_kds = exynos_crtc->pending_kds;
+ exynos_crtc->pending_kds = NULL;
+ exynos_crtc->flip_in_flight--;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
- trace_exynos_flip_complete(exynos_crtc->pipe);
- return ret;
+ if (kds)
+ kds_resource_set_release(&kds);
+
+ drm_vblank_put(drm_dev, crtc_idx);
}
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)