2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
8 #include <linux/seq_file.h>
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_fb_cma_helper.h>
12 #include <drm/drm_gem_cma_helper.h>
14 #include "sti_compositor.h"
16 #include "sti_plane.h"
19 #define ALPHASWITCH BIT(6)
20 #define ENA_COLOR_FILL BIT(8)
21 #define BIGNOTLITTLE BIT(23)
22 #define WAIT_NEXT_VSYNC BIT(31)
24 /* GDP color formats */
25 #define GDP_RGB565 0x00
26 #define GDP_RGB888 0x01
27 #define GDP_RGB888_32 0x02
28 #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
29 #define GDP_ARGB8565 0x04
30 #define GDP_ARGB8888 0x05
31 #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
32 #define GDP_ARGB1555 0x06
33 #define GDP_ARGB4444 0x07
35 #define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
37 static struct gdp_format_to_str {
40 } gdp_format_to_str[] = {
52 #define GAM_GDP_CTL_OFFSET 0x00
53 #define GAM_GDP_AGC_OFFSET 0x04
54 #define GAM_GDP_VPO_OFFSET 0x0C
55 #define GAM_GDP_VPS_OFFSET 0x10
56 #define GAM_GDP_PML_OFFSET 0x14
57 #define GAM_GDP_PMP_OFFSET 0x18
58 #define GAM_GDP_SIZE_OFFSET 0x1C
59 #define GAM_GDP_NVN_OFFSET 0x24
60 #define GAM_GDP_KEY1_OFFSET 0x28
61 #define GAM_GDP_KEY2_OFFSET 0x2C
62 #define GAM_GDP_PPT_OFFSET 0x34
63 #define GAM_GDP_CML_OFFSET 0x3C
64 #define GAM_GDP_MST_OFFSET 0x68
66 #define GAM_GDP_ALPHARANGE_255 BIT(5)
67 #define GAM_GDP_AGC_FULL_RANGE 0x00808080
68 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
69 #define GAM_GDP_SIZE_MAX 0x7FF
71 #define GDP_NODE_NB_BANK 2
72 #define GDP_NODE_PER_FIELD 2
93 struct sti_gdp_node_list {
94 struct sti_gdp_node *top_field;
95 dma_addr_t top_field_paddr;
96 struct sti_gdp_node *btm_field;
97 dma_addr_t btm_field_paddr;
103 * @sti_plane: sti_plane structure
104 * @dev: driver device
105 * @regs: gdp registers
106 * @clk_pix: pixel clock for the current gdp
107 * @clk_main_parent: gdp parent clock if main path used
108 * @clk_aux_parent: gdp parent clock if aux path used
109 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
110 * @is_curr_top: true if the current node processed is the top field
111 * @node_list: array of node list
112 * @vtg: registered vtg
115 struct sti_plane plane;
119 struct clk *clk_main_parent;
120 struct clk *clk_aux_parent;
121 struct notifier_block vtg_field_nb;
123 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
127 #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
129 static const uint32_t gdp_supported_formats[] = {
140 #define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
141 readl(gdp->regs + reg ## _OFFSET))
143 static void gdp_dbg_ctl(struct seq_file *s, int val)
147 seq_puts(s, "\tColor:");
148 for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
149 if (gdp_format_to_str[i].format == (val & 0x1F)) {
150 seq_printf(s, gdp_format_to_str[i].name);
154 if (i == ARRAY_SIZE(gdp_format_to_str))
155 seq_puts(s, "<UNKNOWN>");
157 seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
160 static void gdp_dbg_vpo(struct seq_file *s, int val)
162 seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
165 static void gdp_dbg_vps(struct seq_file *s, int val)
167 seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
170 static void gdp_dbg_size(struct seq_file *s, int val)
172 seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
175 static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
180 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
181 if (gdp->node_list[i].top_field_paddr == val) {
182 base = gdp->node_list[i].top_field;
185 if (gdp->node_list[i].btm_field_paddr == val) {
186 base = gdp->node_list[i].btm_field;
192 seq_printf(s, "\tVirt @: %p", base);
195 static void gdp_dbg_ppt(struct seq_file *s, int val)
197 if (val & GAM_GDP_PPT_IGNORE)
198 seq_puts(s, "\tNot displayed on mixer!");
201 static void gdp_dbg_mst(struct seq_file *s, int val)
204 seq_puts(s, "\tBUFFER UNDERFLOW!");
207 static int gdp_dbg_show(struct seq_file *s, void *data)
209 struct drm_info_node *node = s->private;
210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
211 struct drm_device *dev = node->minor->dev;
212 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
213 struct drm_crtc *crtc = drm_plane->crtc;
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
220 seq_printf(s, "%s: (vaddr = 0x%p)",
221 sti_plane_to_str(&gdp->plane), gdp->regs);
223 DBGFS_DUMP(GAM_GDP_CTL);
224 gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
225 DBGFS_DUMP(GAM_GDP_AGC);
226 DBGFS_DUMP(GAM_GDP_VPO);
227 gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
228 DBGFS_DUMP(GAM_GDP_VPS);
229 gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
230 DBGFS_DUMP(GAM_GDP_PML);
231 DBGFS_DUMP(GAM_GDP_PMP);
232 DBGFS_DUMP(GAM_GDP_SIZE);
233 gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
234 DBGFS_DUMP(GAM_GDP_NVN);
235 gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
236 DBGFS_DUMP(GAM_GDP_KEY1);
237 DBGFS_DUMP(GAM_GDP_KEY2);
238 DBGFS_DUMP(GAM_GDP_PPT);
239 gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
240 DBGFS_DUMP(GAM_GDP_CML);
241 DBGFS_DUMP(GAM_GDP_MST);
242 gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
246 seq_puts(s, " Not connected to any DRM CRTC\n");
248 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
249 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
251 mutex_unlock(&dev->struct_mutex);
255 static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
257 seq_printf(s, "\t@:0x%p", node);
258 seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
259 gdp_dbg_ctl(s, node->gam_gdp_ctl);
260 seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
261 seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
262 gdp_dbg_vpo(s, node->gam_gdp_vpo);
263 seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
264 gdp_dbg_vps(s, node->gam_gdp_vps);
265 seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
266 seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
267 seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
268 gdp_dbg_size(s, node->gam_gdp_size);
269 seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
270 seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
271 seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
272 seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
273 gdp_dbg_ppt(s, node->gam_gdp_ppt);
274 seq_printf(s, "\n\tCML 0x%08X", node->gam_gdp_cml);
278 static int gdp_node_dbg_show(struct seq_file *s, void *arg)
280 struct drm_info_node *node = s->private;
281 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
282 struct drm_device *dev = node->minor->dev;
286 ret = mutex_lock_interruptible(&dev->struct_mutex);
290 for (b = 0; b < GDP_NODE_NB_BANK; b++) {
291 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
292 gdp_node_dump_node(s, gdp->node_list[b].top_field);
293 seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
294 gdp_node_dump_node(s, gdp->node_list[b].btm_field);
297 mutex_unlock(&dev->struct_mutex);
301 static struct drm_info_list gdp0_debugfs_files[] = {
302 { "gdp0", gdp_dbg_show, 0, NULL },
303 { "gdp0_node", gdp_node_dbg_show, 0, NULL },
306 static struct drm_info_list gdp1_debugfs_files[] = {
307 { "gdp1", gdp_dbg_show, 0, NULL },
308 { "gdp1_node", gdp_node_dbg_show, 0, NULL },
311 static struct drm_info_list gdp2_debugfs_files[] = {
312 { "gdp2", gdp_dbg_show, 0, NULL },
313 { "gdp2_node", gdp_node_dbg_show, 0, NULL },
316 static struct drm_info_list gdp3_debugfs_files[] = {
317 { "gdp3", gdp_dbg_show, 0, NULL },
318 { "gdp3_node", gdp_node_dbg_show, 0, NULL },
321 static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
324 struct drm_info_list *gdp_debugfs_files;
327 switch (gdp->plane.desc) {
329 gdp_debugfs_files = gdp0_debugfs_files;
330 nb_files = ARRAY_SIZE(gdp0_debugfs_files);
333 gdp_debugfs_files = gdp1_debugfs_files;
334 nb_files = ARRAY_SIZE(gdp1_debugfs_files);
337 gdp_debugfs_files = gdp2_debugfs_files;
338 nb_files = ARRAY_SIZE(gdp2_debugfs_files);
341 gdp_debugfs_files = gdp3_debugfs_files;
342 nb_files = ARRAY_SIZE(gdp3_debugfs_files);
348 for (i = 0; i < nb_files; i++)
349 gdp_debugfs_files[i].data = gdp;
351 return drm_debugfs_create_files(gdp_debugfs_files,
353 minor->debugfs_root, minor);
356 static int sti_gdp_fourcc2format(int fourcc)
359 case DRM_FORMAT_XRGB8888:
360 return GDP_RGB888_32;
361 case DRM_FORMAT_XBGR8888:
363 case DRM_FORMAT_ARGB8888:
365 case DRM_FORMAT_ABGR8888:
367 case DRM_FORMAT_ARGB4444:
369 case DRM_FORMAT_ARGB1555:
371 case DRM_FORMAT_RGB565:
373 case DRM_FORMAT_RGB888:
379 static int sti_gdp_get_alpharange(int format)
385 return GAM_GDP_ALPHARANGE_255;
391 * sti_gdp_get_free_nodes
394 * Look for a GDP node list that is not currently read by the HW.
397 * Pointer to the free GDP node list
399 static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
404 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
408 for (i = 0; i < GDP_NODE_NB_BANK; i++)
409 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
410 (hw_nvn != gdp->node_list[i].top_field_paddr))
411 return &gdp->node_list[i];
413 /* in hazardious cases restart with the first node */
414 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
415 sti_plane_to_str(&gdp->plane), hw_nvn);
418 return &gdp->node_list[0];
422 * sti_gdp_get_current_nodes
425 * Look for GDP nodes that are currently read by the HW.
428 * Pointer to the current GDP node list
431 struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
436 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
440 for (i = 0; i < GDP_NODE_NB_BANK; i++)
441 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
442 (hw_nvn == gdp->node_list[i].top_field_paddr))
443 return &gdp->node_list[i];
446 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
447 hw_nvn, sti_plane_to_str(&gdp->plane));
458 static void sti_gdp_disable(struct sti_gdp *gdp)
462 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
464 /* Set the nodes as 'to be ignored on mixer' */
465 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
466 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
467 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
470 if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
471 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
474 clk_disable_unprepare(gdp->clk_pix);
476 gdp->plane.status = STI_PLANE_DISABLED;
481 * @nb: notifier block
482 * @event: event message
483 * @data: private data
485 * Handle VTG top field and bottom field event.
490 int sti_gdp_field_cb(struct notifier_block *nb,
491 unsigned long event, void *data)
493 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
495 if (gdp->plane.status == STI_PLANE_FLUSHING) {
496 /* disable need to be synchronize on vsync event */
497 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
498 sti_plane_to_str(&gdp->plane));
500 sti_gdp_disable(gdp);
504 case VTG_TOP_FIELD_EVENT:
505 gdp->is_curr_top = true;
507 case VTG_BOTTOM_FIELD_EVENT:
508 gdp->is_curr_top = false;
511 DRM_ERROR("unsupported event: %lu\n", event);
518 static void sti_gdp_init(struct sti_gdp *gdp)
520 struct device_node *np = gdp->dev->of_node;
523 unsigned int i, size;
525 /* Allocate all the nodes within a single memory page */
526 size = sizeof(struct sti_gdp_node) *
527 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
528 base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
531 DRM_ERROR("Failed to allocate memory for GDP node\n");
534 memset(base, 0, size);
536 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
537 if (dma_addr & 0xF) {
538 DRM_ERROR("Mem alignment failed\n");
541 gdp->node_list[i].top_field = base;
542 gdp->node_list[i].top_field_paddr = dma_addr;
544 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
545 base += sizeof(struct sti_gdp_node);
546 dma_addr += sizeof(struct sti_gdp_node);
548 if (dma_addr & 0xF) {
549 DRM_ERROR("Mem alignment failed\n");
552 gdp->node_list[i].btm_field = base;
553 gdp->node_list[i].btm_field_paddr = dma_addr;
554 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
555 base += sizeof(struct sti_gdp_node);
556 dma_addr += sizeof(struct sti_gdp_node);
559 if (of_device_is_compatible(np, "st,stih407-compositor")) {
560 /* GDP of STiH407 chip have its own pixel clock */
563 switch (gdp->plane.desc) {
565 clk_name = "pix_gdp1";
568 clk_name = "pix_gdp2";
571 clk_name = "pix_gdp3";
574 clk_name = "pix_gdp4";
577 DRM_ERROR("GDP id not recognized\n");
581 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
582 if (IS_ERR(gdp->clk_pix))
583 DRM_ERROR("Cannot get %s clock\n", clk_name);
585 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
586 if (IS_ERR(gdp->clk_main_parent))
587 DRM_ERROR("Cannot get main_parent clock\n");
589 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
590 if (IS_ERR(gdp->clk_aux_parent))
591 DRM_ERROR("Cannot get aux_parent clock\n");
598 * @dst: requested destination size
601 * Return the cropped / clamped destination size
604 * cropped / clamped destination size
606 static int sti_gdp_get_dst(struct device *dev, int dst, int src)
612 dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
616 dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
620 static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
621 struct drm_plane_state *state)
623 struct sti_plane *plane = to_sti_plane(drm_plane);
624 struct sti_gdp *gdp = to_sti_gdp(plane);
625 struct drm_crtc *crtc = state->crtc;
626 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
627 struct drm_framebuffer *fb = state->fb;
628 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
629 struct drm_crtc_state *crtc_state;
630 struct sti_mixer *mixer;
631 struct drm_display_mode *mode;
632 int dst_x, dst_y, dst_w, dst_h;
633 int src_x, src_y, src_w, src_h;
636 /* no need for further checks if the plane is being disabled */
640 mixer = to_sti_mixer(crtc);
641 crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
642 mode = &crtc_state->mode;
643 dst_x = state->crtc_x;
644 dst_y = state->crtc_y;
645 dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
646 dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
647 /* src_x are in 16.16 format */
648 src_x = state->src_x >> 16;
649 src_y = state->src_y >> 16;
650 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
651 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
653 format = sti_gdp_fourcc2format(fb->pixel_format);
655 DRM_ERROR("Format not supported by GDP %.4s\n",
656 (char *)&fb->pixel_format);
660 if (!drm_fb_cma_get_gem_obj(fb, 0)) {
661 DRM_ERROR("Can't get CMA GEM object for fb\n");
666 /* Register gdp callback */
667 gdp->vtg = mixer->id == STI_MIXER_MAIN ?
668 compo->vtg_main : compo->vtg_aux;
669 if (sti_vtg_register_client(gdp->vtg,
670 &gdp->vtg_field_nb, crtc)) {
671 DRM_ERROR("Cannot register VTG notifier\n");
675 /* Set and enable gdp clock */
678 int rate = mode->clock * 1000;
682 * According to the mixer used, the gdp pixel clock
683 * should have a different parent clock.
685 if (mixer->id == STI_MIXER_MAIN)
686 clkp = gdp->clk_main_parent;
688 clkp = gdp->clk_aux_parent;
691 clk_set_parent(gdp->clk_pix, clkp);
693 res = clk_set_rate(gdp->clk_pix, rate);
695 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
700 if (clk_prepare_enable(gdp->clk_pix)) {
701 DRM_ERROR("Failed to prepare/enable gdp\n");
707 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
708 crtc->base.id, sti_mixer_to_str(mixer),
709 drm_plane->base.id, sti_plane_to_str(plane));
710 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
711 sti_plane_to_str(plane),
712 dst_w, dst_h, dst_x, dst_y,
713 src_w, src_h, src_x, src_y);
718 static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
719 struct drm_plane_state *oldstate)
721 struct drm_plane_state *state = drm_plane->state;
722 struct sti_plane *plane = to_sti_plane(drm_plane);
723 struct sti_gdp *gdp = to_sti_gdp(plane);
724 struct drm_crtc *crtc = state->crtc;
725 struct drm_framebuffer *fb = state->fb;
726 struct drm_display_mode *mode;
727 int dst_x, dst_y, dst_w, dst_h;
728 int src_x, src_y, src_w, src_h;
729 struct drm_gem_cma_object *cma_obj;
730 struct sti_gdp_node_list *list;
731 struct sti_gdp_node_list *curr_list;
732 struct sti_gdp_node *top_field, *btm_field;
736 unsigned int depth, bpp;
737 u32 ydo, xdo, yds, xds;
743 dst_x = state->crtc_x;
744 dst_y = state->crtc_y;
745 dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
746 dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
747 /* src_x are in 16.16 format */
748 src_x = state->src_x >> 16;
749 src_y = state->src_y >> 16;
750 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
751 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
753 list = sti_gdp_get_free_nodes(gdp);
754 top_field = list->top_field;
755 btm_field = list->btm_field;
757 dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
758 sti_plane_to_str(plane), top_field, btm_field);
760 /* build the top field */
761 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
762 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
763 format = sti_gdp_fourcc2format(fb->pixel_format);
764 top_field->gam_gdp_ctl |= format;
765 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
766 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
768 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
770 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
771 (char *)&fb->pixel_format,
772 (unsigned long)cma_obj->paddr);
774 /* pixel memory location */
775 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
776 top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
777 top_field->gam_gdp_pml += src_x * (bpp >> 3);
778 top_field->gam_gdp_pml += src_y * fb->pitches[0];
780 /* output parameters (clamped / cropped) */
781 dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
782 dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
783 ydo = sti_vtg_get_line_number(*mode, dst_y);
784 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
785 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
786 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
787 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
788 top_field->gam_gdp_vps = (yds << 16) | xds;
790 /* input parameters */
792 top_field->gam_gdp_pmp = fb->pitches[0];
793 top_field->gam_gdp_size = src_h << 16 | src_w;
795 /* Same content and chained together */
796 memcpy(btm_field, top_field, sizeof(*btm_field));
797 top_field->gam_gdp_nvn = list->btm_field_paddr;
798 btm_field->gam_gdp_nvn = list->top_field_paddr;
800 /* Interlaced mode */
801 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
802 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
805 /* Update the NVN field of the 'right' field of the current GDP node
806 * (being used by the HW) with the address of the updated ('free') top
808 * - In interlaced mode the 'right' field is the bottom field as we
809 * update frames starting from their top field
810 * - In progressive mode, we update both bottom and top fields which
812 * At the next VSYNC, the updated node list will be used by the HW.
814 curr_list = sti_gdp_get_current_nodes(gdp);
815 dma_updated_top = list->top_field_paddr;
816 dma_updated_btm = list->btm_field_paddr;
818 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
819 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
820 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
821 (unsigned long)cma_obj->paddr,
822 readl(gdp->regs + GAM_GDP_PML_OFFSET));
825 /* First update or invalid node should directly write in the
827 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
828 sti_plane_to_str(plane));
830 writel(gdp->is_curr_top ?
831 dma_updated_btm : dma_updated_top,
832 gdp->regs + GAM_GDP_NVN_OFFSET);
836 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
837 if (gdp->is_curr_top) {
838 /* Do not update in the middle of the frame, but
839 * postpone the update after the bottom field has
841 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
843 /* Direct update to avoid one frame delay */
844 writel(dma_updated_top,
845 gdp->regs + GAM_GDP_NVN_OFFSET);
848 /* Direct update for progressive to avoid one frame delay */
849 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
853 sti_plane_update_fps(plane, true, false);
855 plane->status = STI_PLANE_UPDATED;
858 static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
859 struct drm_plane_state *oldstate)
861 struct sti_plane *plane = to_sti_plane(drm_plane);
863 if (!drm_plane->crtc) {
864 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
869 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
870 drm_plane->crtc->base.id,
871 sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
872 drm_plane->base.id, sti_plane_to_str(plane));
874 plane->status = STI_PLANE_DISABLING;
877 static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
878 .atomic_check = sti_gdp_atomic_check,
879 .atomic_update = sti_gdp_atomic_update,
880 .atomic_disable = sti_gdp_atomic_disable,
883 struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
884 struct device *dev, int desc,
885 void __iomem *baseaddr,
886 unsigned int possible_crtcs,
887 enum drm_plane_type type)
892 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
894 DRM_ERROR("Failed to allocate memory for GDP\n");
899 gdp->regs = baseaddr;
900 gdp->plane.desc = desc;
901 gdp->plane.status = STI_PLANE_DISABLED;
903 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
907 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
909 &sti_plane_helpers_funcs,
910 gdp_supported_formats,
911 ARRAY_SIZE(gdp_supported_formats),
914 DRM_ERROR("Failed to initialize universal plane\n");
918 drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
920 sti_plane_init_property(&gdp->plane, type);
922 if (gdp_debugfs_init(gdp, drm_dev->primary))
923 DRM_ERROR("GDP debugfs setup failed\n");
925 return &gdp->plane.drm_plane;
928 devm_kfree(dev, gdp);