Merge tag 'topic/core-stuff-2014-12-19' of git://anongit.freedesktop.org/drm-intel...
[cascardo/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include "mdp5_kms.h"
20
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
23 #include "drm_crtc.h"
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
26
27 #define SSPP_MAX        (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
29 struct mdp5_crtc {
30         struct drm_crtc base;
31         char name[8];
32         int id;
33         bool enabled;
34
35         /* layer mixer used for this CRTC (+ its lock): */
36 #define GET_LM_ID(crtc_id)      ((crtc_id == 3) ? 5 : crtc_id)
37         int lm;
38         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
39
40         /* CTL used for this CRTC: */
41         struct mdp5_ctl *ctl;
42
43         /* if there is a pending flip, these will be non-null: */
44         struct drm_pending_vblank_event *event;
45
46 #define PENDING_CURSOR 0x1
47 #define PENDING_FLIP   0x2
48         atomic_t pending;
49
50         struct mdp_irq vblank;
51         struct mdp_irq err;
52 };
53 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
54
55 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
56 {
57         struct msm_drm_private *priv = crtc->dev->dev_private;
58         return to_mdp5_kms(to_mdp_kms(priv->kms));
59 }
60
61 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
62 {
63         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
64
65         atomic_or(pending, &mdp5_crtc->pending);
66         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
67 }
68
69 #define mdp5_lm_get_flush(lm)   mdp_ctl_flush_mask_lm(lm)
70
71 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
72 {
73         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
74
75         DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
76         mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
77 }
78
79 /*
80  * flush updates, to make sure hw is updated to new scanout fb,
81  * so that we can safely queue unref to current fb (ie. next
82  * vblank we know hw is done w/ previous scanout_fb).
83  */
84 static void crtc_flush_all(struct drm_crtc *crtc)
85 {
86         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87         struct drm_plane *plane;
88         uint32_t flush_mask = 0;
89
90         /* we could have already released CTL in the disable path: */
91         if (!mdp5_crtc->ctl)
92                 return;
93
94         drm_atomic_crtc_for_each_plane(plane, crtc) {
95                 flush_mask |= mdp5_plane_get_flush(plane);
96         }
97         flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98         flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
99
100         crtc_flush(crtc, flush_mask);
101 }
102
103 /* if file!=NULL, this is preclose potential cancel-flip path */
104 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
105 {
106         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
107         struct drm_device *dev = crtc->dev;
108         struct drm_pending_vblank_event *event;
109         struct drm_plane *plane;
110         unsigned long flags;
111
112         spin_lock_irqsave(&dev->event_lock, flags);
113         event = mdp5_crtc->event;
114         if (event) {
115                 /* if regular vblank case (!file) or if cancel-flip from
116                  * preclose on file that requested flip, then send the
117                  * event:
118                  */
119                 if (!file || (event->base.file_priv == file)) {
120                         mdp5_crtc->event = NULL;
121                         DBG("%s: send event: %p", mdp5_crtc->name, event);
122                         drm_send_vblank_event(dev, mdp5_crtc->id, event);
123                 }
124         }
125         spin_unlock_irqrestore(&dev->event_lock, flags);
126
127         drm_atomic_crtc_for_each_plane(plane, crtc) {
128                 mdp5_plane_complete_flip(plane);
129         }
130 }
131
132 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
133 {
134         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
135
136         drm_crtc_cleanup(crtc);
137
138         kfree(mdp5_crtc);
139 }
140
141 static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
142 {
143         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
144         struct mdp5_kms *mdp5_kms = get_kms(crtc);
145         bool enabled = (mode == DRM_MODE_DPMS_ON);
146
147         DBG("%s: mode=%d", mdp5_crtc->name, mode);
148
149         if (enabled != mdp5_crtc->enabled) {
150                 if (enabled) {
151                         mdp5_enable(mdp5_kms);
152                         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
153                 } else {
154                         /* set STAGE_UNUSED for all layers */
155                         mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
156                         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
157                         mdp5_disable(mdp5_kms);
158                 }
159                 mdp5_crtc->enabled = enabled;
160         }
161 }
162
163 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
164                 const struct drm_display_mode *mode,
165                 struct drm_display_mode *adjusted_mode)
166 {
167         return true;
168 }
169
170 /*
171  * blend_setup() - blend all the planes of a CRTC
172  *
173  * When border is enabled, the border color will ALWAYS be the base layer.
174  * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175  * If disabled, the first plane starts at STAGE_BASE.
176  *
177  * Note:
178  * Border is not enabled here because the private plane is exactly
179  * the CRTC resolution.
180  */
181 static void blend_setup(struct drm_crtc *crtc)
182 {
183         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
184         struct mdp5_kms *mdp5_kms = get_kms(crtc);
185         struct drm_plane *plane;
186         const struct mdp5_cfg_hw *hw_cfg;
187         uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188         unsigned long flags;
189 #define blender(stage)  ((stage) - STAGE_BASE)
190
191         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
192
193         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
194
195         /* ctl could be released already when we are shutting down: */
196         if (!mdp5_crtc->ctl)
197                 goto out;
198
199         drm_atomic_crtc_for_each_plane(plane, crtc) {
200                 enum mdp_mixer_stage_id stage =
201                         to_mdp5_plane_state(plane->state)->stage;
202
203                 /*
204                  * Note: This cannot happen with current implementation but
205                  * we need to check this condition once z property is added
206                  */
207                 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209                 /* LM */
210                 mdp5_write(mdp5_kms,
211                                 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212                                 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215                                 blender(stage)), 0xff);
216                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217                                 blender(stage)), 0x00);
218                 /* CTL */
219                 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220                 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221                                 pipe2name(mdp5_plane_pipe(plane)), stage);
222         }
223
224         DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225         mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227 out:
228         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
229 }
230
231 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
232 {
233         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234         struct mdp5_kms *mdp5_kms = get_kms(crtc);
235         unsigned long flags;
236         struct drm_display_mode *mode;
237
238         if (WARN_ON(!crtc->state))
239                 return;
240
241         mode = &crtc->state->adjusted_mode;
242
243         DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
244                         mdp5_crtc->name, mode->base.id, mode->name,
245                         mode->vrefresh, mode->clock,
246                         mode->hdisplay, mode->hsync_start,
247                         mode->hsync_end, mode->htotal,
248                         mode->vdisplay, mode->vsync_start,
249                         mode->vsync_end, mode->vtotal,
250                         mode->type, mode->flags);
251
252         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
253         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
254                         MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
255                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
256         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
257 }
258
259 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
260 {
261         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
262         DBG("%s", mdp5_crtc->name);
263         /* make sure we hold a ref to mdp clks while setting up mode: */
264         mdp5_enable(get_kms(crtc));
265         mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
266 }
267
268 static void mdp5_crtc_commit(struct drm_crtc *crtc)
269 {
270         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271         DBG("%s", mdp5_crtc->name);
272         mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
273         crtc_flush_all(crtc);
274         /* drop the ref to mdp clk's that we got in prepare: */
275         mdp5_disable(get_kms(crtc));
276 }
277
278 struct plane_state {
279         struct drm_plane *plane;
280         struct mdp5_plane_state *state;
281 };
282
283 static int pstate_cmp(const void *a, const void *b)
284 {
285         struct plane_state *pa = (struct plane_state *)a;
286         struct plane_state *pb = (struct plane_state *)b;
287         return pa->state->zpos - pb->state->zpos;
288 }
289
290 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
291                 struct drm_crtc_state *state)
292 {
293         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
294         struct mdp5_kms *mdp5_kms = get_kms(crtc);
295         struct drm_plane *plane;
296         struct drm_device *dev = crtc->dev;
297         struct plane_state pstates[STAGE3 + 1];
298         int cnt = 0, i;
299
300         DBG("%s: check", mdp5_crtc->name);
301
302         /* request a free CTL, if none is already allocated for this CRTC */
303         if (state->enable && !mdp5_crtc->ctl) {
304                 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
305                 if (WARN_ON(!mdp5_crtc->ctl))
306                         return -EINVAL;
307         }
308
309         /* verify that there are not too many planes attached to crtc
310          * and that we don't have conflicting mixer stages:
311          */
312         drm_atomic_crtc_state_for_each_plane(plane, state) {
313                 struct drm_plane_state *pstate;
314
315                 if (cnt >= ARRAY_SIZE(pstates)) {
316                         dev_err(dev->dev, "too many planes!\n");
317                         return -EINVAL;
318                 }
319
320                 pstate = state->state->plane_states[drm_plane_index(plane)];
321
322                 /* plane might not have changed, in which case take
323                  * current state:
324                  */
325                 if (!pstate)
326                         pstate = plane->state;
327
328                 pstates[cnt].plane = plane;
329                 pstates[cnt].state = to_mdp5_plane_state(pstate);
330
331                 cnt++;
332         }
333
334         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
335
336         for (i = 0; i < cnt; i++) {
337                 pstates[i].state->stage = STAGE_BASE + i;
338                 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
339                                 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
340                                 pstates[i].state->stage);
341         }
342
343         return 0;
344 }
345
346 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
347 {
348         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
349         DBG("%s: begin", mdp5_crtc->name);
350 }
351
352 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
353 {
354         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
355         struct drm_device *dev = crtc->dev;
356         unsigned long flags;
357
358         DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
359
360         WARN_ON(mdp5_crtc->event);
361
362         spin_lock_irqsave(&dev->event_lock, flags);
363         mdp5_crtc->event = crtc->state->event;
364         spin_unlock_irqrestore(&dev->event_lock, flags);
365
366         blend_setup(crtc);
367         crtc_flush_all(crtc);
368         request_pending(crtc, PENDING_FLIP);
369
370         if (mdp5_crtc->ctl && !crtc->state->enable) {
371                 mdp5_ctl_release(mdp5_crtc->ctl);
372                 mdp5_crtc->ctl = NULL;
373         }
374 }
375
376 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
377                 struct drm_property *property, uint64_t val)
378 {
379         // XXX
380         return -EINVAL;
381 }
382
383 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
384         .set_config = drm_atomic_helper_set_config,
385         .destroy = mdp5_crtc_destroy,
386         .page_flip = drm_atomic_helper_page_flip,
387         .set_property = mdp5_crtc_set_property,
388         .reset = drm_atomic_helper_crtc_reset,
389         .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
390         .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
391 };
392
393 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
394         .dpms = mdp5_crtc_dpms,
395         .mode_fixup = mdp5_crtc_mode_fixup,
396         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
397         .mode_set = drm_helper_crtc_mode_set,
398         .mode_set_base = drm_helper_crtc_mode_set_base,
399         .prepare = mdp5_crtc_prepare,
400         .commit = mdp5_crtc_commit,
401         .atomic_check = mdp5_crtc_atomic_check,
402         .atomic_begin = mdp5_crtc_atomic_begin,
403         .atomic_flush = mdp5_crtc_atomic_flush,
404 };
405
406 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
407 {
408         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
409         struct drm_crtc *crtc = &mdp5_crtc->base;
410         unsigned pending;
411
412         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
413
414         pending = atomic_xchg(&mdp5_crtc->pending, 0);
415
416         if (pending & PENDING_FLIP) {
417                 complete_flip(crtc, NULL);
418         }
419 }
420
421 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
422 {
423         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
424
425         DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
426 }
427
428 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
429 {
430         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
431         return mdp5_crtc->vblank.irqmask;
432 }
433
434 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
435 {
436         DBG("cancel: %p", file);
437         complete_flip(crtc, file);
438 }
439
440 /* set interface for routing crtc->encoder: */
441 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
442                 enum mdp5_intf intf_id)
443 {
444         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
445         struct mdp5_kms *mdp5_kms = get_kms(crtc);
446         uint32_t flush_mask = 0;
447         uint32_t intf_sel;
448         unsigned long flags;
449
450         /* now that we know what irq's we want: */
451         mdp5_crtc->err.irqmask = intf2err(intf);
452         mdp5_crtc->vblank.irqmask = intf2vblank(intf);
453         mdp_irq_update(&mdp5_kms->base);
454
455         spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
456         intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
457
458         switch (intf) {
459         case 0:
460                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
461                 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
462                 break;
463         case 1:
464                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
465                 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
466                 break;
467         case 2:
468                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
469                 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
470                 break;
471         case 3:
472                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
473                 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
474                 break;
475         default:
476                 BUG();
477                 break;
478         }
479
480         mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
481         spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
482
483         DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
484         mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
485         flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
486         flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
487
488         crtc_flush(crtc, flush_mask);
489 }
490
491 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
492 {
493         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
494
495         if (WARN_ON(!crtc))
496                 return -EINVAL;
497
498         return mdp5_crtc->lm;
499 }
500
501 /* initialize crtc */
502 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
503                 struct drm_plane *plane, int id)
504 {
505         struct drm_crtc *crtc = NULL;
506         struct mdp5_crtc *mdp5_crtc;
507
508         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
509         if (!mdp5_crtc)
510                 return ERR_PTR(-ENOMEM);
511
512         crtc = &mdp5_crtc->base;
513
514         mdp5_crtc->id = id;
515         mdp5_crtc->lm = GET_LM_ID(id);
516
517         spin_lock_init(&mdp5_crtc->lm_lock);
518
519         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
520         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
521
522         snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
523                         pipe2name(mdp5_plane_pipe(plane)), id);
524
525         drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
526         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
527         plane->crtc = crtc;
528
529         mdp5_plane_install_properties(plane, &crtc->base);
530
531         return crtc;
532 }