Merge tag 'at91-ab-4.8-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni...
[cascardo/linux.git] / drivers / gpu / drm / drm_fb_cma_helper.c
1 /*
2  * drm kms/fb cma (contiguous memory allocator) helper functions
3  *
4  * Copyright (C) 2012 Analog Device Inc.
5  *   Author: Lars-Peter Clausen <lars@metafoo.de>
6  *
7  * Based on udl_fbdev.c
8  *  Copyright (C) 2012 Red Hat
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <drm/drmP.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_crtc_helper.h>
24 #include <drm/drm_gem_cma_helper.h>
25 #include <drm/drm_fb_cma_helper.h>
26 #include <linux/module.h>
27
28 #define DEFAULT_FBDEFIO_DELAY_MS 50
29
30 struct drm_fb_cma {
31         struct drm_framebuffer          fb;
32         struct drm_gem_cma_object       *obj[4];
33 };
34
35 struct drm_fbdev_cma {
36         struct drm_fb_helper    fb_helper;
37         struct drm_fb_cma       *fb;
38 };
39
40 /**
41  * DOC: framebuffer cma helper functions
42  *
43  * Provides helper functions for creating a cma (contiguous memory allocator)
44  * backed framebuffer.
45  *
46  * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
47  * callback function to create a cma backed framebuffer.
48  *
49  * An fbdev framebuffer backed by cma is also available by calling
50  * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
51  * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
52  * will be set up automatically. dirty() is called by
53  * drm_fb_helper_deferred_io() in process context (struct delayed_work).
54  *
55  * Example fbdev deferred io code:
56  *
57  *     static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
58  *                                      struct drm_file *file_priv,
59  *                                      unsigned flags, unsigned color,
60  *                                      struct drm_clip_rect *clips,
61  *                                      unsigned num_clips)
62  *     {
63  *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
64  *         ... push changes ...
65  *         return 0;
66  *     }
67  *
68  *     static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
69  *         .destroy       = drm_fb_cma_destroy,
70  *         .create_handle = drm_fb_cma_create_handle,
71  *         .dirty         = driver_fbdev_fb_dirty,
72  *     };
73  *
74  *     static int driver_fbdev_create(struct drm_fb_helper *helper,
75  *             struct drm_fb_helper_surface_size *sizes)
76  *     {
77  *         return drm_fbdev_cma_create_with_funcs(helper, sizes,
78  *                                                &driver_fbdev_fb_funcs);
79  *     }
80  *
81  *     static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
82  *         .fb_probe = driver_fbdev_create,
83  *     };
84  *
85  *     Initialize:
86  *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
87  *                                           dev->mode_config.num_crtc,
88  *                                           dev->mode_config.num_connector,
89  *                                           &driver_fb_helper_funcs);
90  *
91  */
92
93 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
94 {
95         return container_of(helper, struct drm_fbdev_cma, fb_helper);
96 }
97
98 static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
99 {
100         return container_of(fb, struct drm_fb_cma, fb);
101 }
102
103 void drm_fb_cma_destroy(struct drm_framebuffer *fb)
104 {
105         struct drm_fb_cma *fb_cma = to_fb_cma(fb);
106         int i;
107
108         for (i = 0; i < 4; i++) {
109                 if (fb_cma->obj[i])
110                         drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
111         }
112
113         drm_framebuffer_cleanup(fb);
114         kfree(fb_cma);
115 }
116 EXPORT_SYMBOL(drm_fb_cma_destroy);
117
118 int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
119         struct drm_file *file_priv, unsigned int *handle)
120 {
121         struct drm_fb_cma *fb_cma = to_fb_cma(fb);
122
123         return drm_gem_handle_create(file_priv,
124                         &fb_cma->obj[0]->base, handle);
125 }
126 EXPORT_SYMBOL(drm_fb_cma_create_handle);
127
128 static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
129         .destroy        = drm_fb_cma_destroy,
130         .create_handle  = drm_fb_cma_create_handle,
131 };
132
133 static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
134         const struct drm_mode_fb_cmd2 *mode_cmd,
135         struct drm_gem_cma_object **obj,
136         unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
137 {
138         struct drm_fb_cma *fb_cma;
139         int ret;
140         int i;
141
142         fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
143         if (!fb_cma)
144                 return ERR_PTR(-ENOMEM);
145
146         drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
147
148         for (i = 0; i < num_planes; i++)
149                 fb_cma->obj[i] = obj[i];
150
151         ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
152         if (ret) {
153                 dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
154                 kfree(fb_cma);
155                 return ERR_PTR(ret);
156         }
157
158         return fb_cma;
159 }
160
161 /**
162  * drm_fb_cma_create_with_funcs() - helper function for the
163  *                                  &drm_mode_config_funcs ->fb_create
164  *                                  callback function
165  *
166  * This can be used to set &drm_framebuffer_funcs for drivers that need the
167  * dirty() callback. Use drm_fb_cma_create() if you don't need to change
168  * &drm_framebuffer_funcs.
169  */
170 struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
171         struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
172         const struct drm_framebuffer_funcs *funcs)
173 {
174         struct drm_fb_cma *fb_cma;
175         struct drm_gem_cma_object *objs[4];
176         struct drm_gem_object *obj;
177         unsigned int hsub;
178         unsigned int vsub;
179         int ret;
180         int i;
181
182         hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
183         vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
184
185         for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
186                 unsigned int width = mode_cmd->width / (i ? hsub : 1);
187                 unsigned int height = mode_cmd->height / (i ? vsub : 1);
188                 unsigned int min_size;
189
190                 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
191                 if (!obj) {
192                         dev_err(dev->dev, "Failed to lookup GEM object\n");
193                         ret = -ENXIO;
194                         goto err_gem_object_unreference;
195                 }
196
197                 min_size = (height - 1) * mode_cmd->pitches[i]
198                          + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
199                          + mode_cmd->offsets[i];
200
201                 if (obj->size < min_size) {
202                         drm_gem_object_unreference_unlocked(obj);
203                         ret = -EINVAL;
204                         goto err_gem_object_unreference;
205                 }
206                 objs[i] = to_drm_gem_cma_obj(obj);
207         }
208
209         fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
210         if (IS_ERR(fb_cma)) {
211                 ret = PTR_ERR(fb_cma);
212                 goto err_gem_object_unreference;
213         }
214
215         return &fb_cma->fb;
216
217 err_gem_object_unreference:
218         for (i--; i >= 0; i--)
219                 drm_gem_object_unreference_unlocked(&objs[i]->base);
220         return ERR_PTR(ret);
221 }
222 EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
223
224 /**
225  * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
226  *
227  * If your hardware has special alignment or pitch requirements these should be
228  * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
229  * you need to set &drm_framebuffer_funcs ->dirty.
230  */
231 struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
232         struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
233 {
234         return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
235                                             &drm_fb_cma_funcs);
236 }
237 EXPORT_SYMBOL_GPL(drm_fb_cma_create);
238
239 /**
240  * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
241  * @fb: The framebuffer
242  * @plane: Which plane
243  *
244  * Return the CMA GEM object for given framebuffer.
245  *
246  * This function will usually be called from the CRTC callback functions.
247  */
248 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
249         unsigned int plane)
250 {
251         struct drm_fb_cma *fb_cma = to_fb_cma(fb);
252
253         if (plane >= 4)
254                 return NULL;
255
256         return fb_cma->obj[plane];
257 }
258 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
259
260 #ifdef CONFIG_DEBUG_FS
261 /*
262  * drm_fb_cma_describe() - Helper to dump information about a single
263  * CMA framebuffer object
264  */
265 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
266 {
267         struct drm_fb_cma *fb_cma = to_fb_cma(fb);
268         int i, n = drm_format_num_planes(fb->pixel_format);
269
270         seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
271                         (char *)&fb->pixel_format);
272
273         for (i = 0; i < n; i++) {
274                 seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
275                                 i, fb->offsets[i], fb->pitches[i]);
276                 drm_gem_cma_describe(fb_cma->obj[i], m);
277         }
278 }
279
280 /**
281  * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
282  * in debugfs.
283  */
284 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
285 {
286         struct drm_info_node *node = (struct drm_info_node *) m->private;
287         struct drm_device *dev = node->minor->dev;
288         struct drm_framebuffer *fb;
289
290         mutex_lock(&dev->mode_config.fb_lock);
291         drm_for_each_fb(fb, dev)
292                 drm_fb_cma_describe(fb, m);
293         mutex_unlock(&dev->mode_config.fb_lock);
294
295         return 0;
296 }
297 EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
298 #endif
299
300 static struct fb_ops drm_fbdev_cma_ops = {
301         .owner          = THIS_MODULE,
302         .fb_fillrect    = drm_fb_helper_sys_fillrect,
303         .fb_copyarea    = drm_fb_helper_sys_copyarea,
304         .fb_imageblit   = drm_fb_helper_sys_imageblit,
305         .fb_check_var   = drm_fb_helper_check_var,
306         .fb_set_par     = drm_fb_helper_set_par,
307         .fb_blank       = drm_fb_helper_blank,
308         .fb_pan_display = drm_fb_helper_pan_display,
309         .fb_setcmap     = drm_fb_helper_setcmap,
310 };
311
312 static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
313                                           struct vm_area_struct *vma)
314 {
315         fb_deferred_io_mmap(info, vma);
316         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
317
318         return 0;
319 }
320
321 static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
322                                     struct drm_gem_cma_object *cma_obj)
323 {
324         struct fb_deferred_io *fbdefio;
325         struct fb_ops *fbops;
326
327         /*
328          * Per device structures are needed because:
329          * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
330          * fbdefio: individual delays
331          */
332         fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
333         fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
334         if (!fbdefio || !fbops) {
335                 kfree(fbdefio);
336                 return -ENOMEM;
337         }
338
339         /* can't be offset from vaddr since dirty() uses cma_obj */
340         fbi->screen_buffer = cma_obj->vaddr;
341         /* fb_deferred_io_fault() needs a physical address */
342         fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
343
344         *fbops = *fbi->fbops;
345         fbi->fbops = fbops;
346
347         fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
348         fbdefio->deferred_io = drm_fb_helper_deferred_io;
349         fbi->fbdefio = fbdefio;
350         fb_deferred_io_init(fbi);
351         fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
352
353         return 0;
354 }
355
356 static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
357 {
358         if (!fbi->fbdefio)
359                 return;
360
361         fb_deferred_io_cleanup(fbi);
362         kfree(fbi->fbdefio);
363         kfree(fbi->fbops);
364 }
365
366 /*
367  * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
368  * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
369  */
370 int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
371         struct drm_fb_helper_surface_size *sizes,
372         const struct drm_framebuffer_funcs *funcs)
373 {
374         struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
375         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
376         struct drm_device *dev = helper->dev;
377         struct drm_gem_cma_object *obj;
378         struct drm_framebuffer *fb;
379         unsigned int bytes_per_pixel;
380         unsigned long offset;
381         struct fb_info *fbi;
382         size_t size;
383         int ret;
384
385         DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
386                         sizes->surface_width, sizes->surface_height,
387                         sizes->surface_bpp);
388
389         bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
390
391         mode_cmd.width = sizes->surface_width;
392         mode_cmd.height = sizes->surface_height;
393         mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
394         mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
395                 sizes->surface_depth);
396
397         size = mode_cmd.pitches[0] * mode_cmd.height;
398         obj = drm_gem_cma_create(dev, size);
399         if (IS_ERR(obj))
400                 return -ENOMEM;
401
402         fbi = drm_fb_helper_alloc_fbi(helper);
403         if (IS_ERR(fbi)) {
404                 ret = PTR_ERR(fbi);
405                 goto err_gem_free_object;
406         }
407
408         fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
409         if (IS_ERR(fbdev_cma->fb)) {
410                 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
411                 ret = PTR_ERR(fbdev_cma->fb);
412                 goto err_fb_info_destroy;
413         }
414
415         fb = &fbdev_cma->fb->fb;
416         helper->fb = fb;
417
418         fbi->par = helper;
419         fbi->flags = FBINFO_FLAG_DEFAULT;
420         fbi->fbops = &drm_fbdev_cma_ops;
421
422         drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
423         drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
424
425         offset = fbi->var.xoffset * bytes_per_pixel;
426         offset += fbi->var.yoffset * fb->pitches[0];
427
428         dev->mode_config.fb_base = (resource_size_t)obj->paddr;
429         fbi->screen_base = obj->vaddr + offset;
430         fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
431         fbi->screen_size = size;
432         fbi->fix.smem_len = size;
433
434         if (funcs->dirty) {
435                 ret = drm_fbdev_cma_defio_init(fbi, obj);
436                 if (ret)
437                         goto err_cma_destroy;
438         }
439
440         return 0;
441
442 err_cma_destroy:
443         drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
444         drm_fb_cma_destroy(&fbdev_cma->fb->fb);
445 err_fb_info_destroy:
446         drm_fb_helper_release_fbi(helper);
447 err_gem_free_object:
448         drm_gem_object_unreference_unlocked(&obj->base);
449         return ret;
450 }
451 EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
452
453 static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
454         struct drm_fb_helper_surface_size *sizes)
455 {
456         return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
457 }
458
459 static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
460         .fb_probe = drm_fbdev_cma_create,
461 };
462
463 /**
464  * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
465  * @dev: DRM device
466  * @preferred_bpp: Preferred bits per pixel for the device
467  * @num_crtc: Number of CRTCs
468  * @max_conn_count: Maximum number of connectors
469  * @funcs: fb helper functions, in particular fb_probe()
470  *
471  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
472  */
473 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
474         unsigned int preferred_bpp, unsigned int num_crtc,
475         unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
476 {
477         struct drm_fbdev_cma *fbdev_cma;
478         struct drm_fb_helper *helper;
479         int ret;
480
481         fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
482         if (!fbdev_cma) {
483                 dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
484                 return ERR_PTR(-ENOMEM);
485         }
486
487         helper = &fbdev_cma->fb_helper;
488
489         drm_fb_helper_prepare(dev, helper, funcs);
490
491         ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
492         if (ret < 0) {
493                 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
494                 goto err_free;
495         }
496
497         ret = drm_fb_helper_single_add_all_connectors(helper);
498         if (ret < 0) {
499                 dev_err(dev->dev, "Failed to add connectors.\n");
500                 goto err_drm_fb_helper_fini;
501
502         }
503
504         ret = drm_fb_helper_initial_config(helper, preferred_bpp);
505         if (ret < 0) {
506                 dev_err(dev->dev, "Failed to set initial hw configuration.\n");
507                 goto err_drm_fb_helper_fini;
508         }
509
510         return fbdev_cma;
511
512 err_drm_fb_helper_fini:
513         drm_fb_helper_fini(helper);
514 err_free:
515         kfree(fbdev_cma);
516
517         return ERR_PTR(ret);
518 }
519 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
520
521 /**
522  * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
523  * @dev: DRM device
524  * @preferred_bpp: Preferred bits per pixel for the device
525  * @num_crtc: Number of CRTCs
526  * @max_conn_count: Maximum number of connectors
527  *
528  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
529  */
530 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
531         unsigned int preferred_bpp, unsigned int num_crtc,
532         unsigned int max_conn_count)
533 {
534         return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
535                                 max_conn_count, &drm_fb_cma_helper_funcs);
536 }
537 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
538
539 /**
540  * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
541  * @fbdev_cma: The drm_fbdev_cma struct
542  */
543 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
544 {
545         drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
546         drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
547         drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
548
549         if (fbdev_cma->fb) {
550                 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
551                 drm_fb_cma_destroy(&fbdev_cma->fb->fb);
552         }
553
554         drm_fb_helper_fini(&fbdev_cma->fb_helper);
555         kfree(fbdev_cma);
556 }
557 EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
558
559 /**
560  * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
561  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
562  *
563  * This function is usually called from the DRM drivers lastclose callback.
564  */
565 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
566 {
567         if (fbdev_cma)
568                 drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
569 }
570 EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
571
572 /**
573  * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
574  * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
575  *
576  * This function is usually called from the DRM drivers output_poll_changed
577  * callback.
578  */
579 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
580 {
581         if (fbdev_cma)
582                 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
583 }
584 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);