Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[cascardo/linux.git] / drivers / gpu / drm / udl / udl_fb.c
1 /*
2  * Copyright (C) 2012 Red Hat
3  *
4  * based in parts on udlfb.c:
5  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License v2. See the file COPYING in the main directory of this archive for
11  * more details.
12  */
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/fb.h>
16
17 #include <drm/drmP.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_crtc_helper.h>
20 #include "udl_drv.h"
21
22 #include <drm/drm_fb_helper.h>
23
24 #define DL_DEFIO_WRITE_DELAY    5 /* fb_deferred_io.delay in jiffies */
25
26 static int fb_defio = 1;  /* Optionally enable experimental fb_defio mmap support */
27 static int fb_bpp = 16;
28
29 module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
30 module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
31
32 struct udl_fbdev {
33         struct drm_fb_helper helper;
34         struct udl_framebuffer ufb;
35         struct list_head fbdev_list;
36         int fb_count;
37 };
38
39 #define DL_ALIGN_UP(x, a) ALIGN(x, a)
40 #define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
41
42 /** Read the red component (0..255) of a 32 bpp colour. */
43 #define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
44
45 /** Read the green component (0..255) of a 32 bpp colour. */
46 #define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
47
48 /** Read the blue component (0..255) of a 32 bpp colour. */
49 #define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
50
51 /** Return red/green component of a 16 bpp colour number. */
52 #define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
53
54 /** Return green/blue component of a 16 bpp colour number. */
55 #define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
56
57 /** Return 8 bpp colour number from red, green and blue components. */
58 #define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
59
60 #if 0
61 static uint8_t rgb8(uint32_t col)
62 {
63         uint8_t red = DLO_RGB_GETRED(col);
64         uint8_t grn = DLO_RGB_GETGRN(col);
65         uint8_t blu = DLO_RGB_GETBLU(col);
66
67         return DLO_RGB8(red, grn, blu);
68 }
69
70 static uint16_t rgb16(uint32_t col)
71 {
72         uint8_t red = DLO_RGB_GETRED(col);
73         uint8_t grn = DLO_RGB_GETGRN(col);
74         uint8_t blu = DLO_RGB_GETBLU(col);
75
76         return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
77 }
78 #endif
79
80 /*
81  * NOTE: fb_defio.c is holding info->fbdefio.mutex
82  *   Touching ANY framebuffer memory that triggers a page fault
83  *   in fb_defio will cause a deadlock, when it also tries to
84  *   grab the same mutex.
85  */
86 static void udlfb_dpy_deferred_io(struct fb_info *info,
87                                   struct list_head *pagelist)
88 {
89         struct page *cur;
90         struct fb_deferred_io *fbdefio = info->fbdefio;
91         struct udl_fbdev *ufbdev = info->par;
92         struct drm_device *dev = ufbdev->ufb.base.dev;
93         struct udl_device *udl = dev->dev_private;
94         struct urb *urb;
95         char *cmd;
96         cycles_t start_cycles, end_cycles;
97         int bytes_sent = 0;
98         int bytes_identical = 0;
99         int bytes_rendered = 0;
100
101         if (!fb_defio)
102                 return;
103
104         start_cycles = get_cycles();
105
106         urb = udl_get_urb(dev);
107         if (!urb)
108                 return;
109
110         cmd = urb->transfer_buffer;
111
112         /* walk the written page list and render each to device */
113         list_for_each_entry(cur, &fbdefio->pagelist, lru) {
114
115                 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
116                                   &urb, (char *) info->fix.smem_start,
117                                   &cmd, cur->index << PAGE_SHIFT,
118                                   PAGE_SIZE, &bytes_identical, &bytes_sent))
119                         goto error;
120                 bytes_rendered += PAGE_SIZE;
121         }
122
123         if (cmd > (char *) urb->transfer_buffer) {
124                 /* Send partial buffer remaining before exiting */
125                 int len = cmd - (char *) urb->transfer_buffer;
126                 udl_submit_urb(dev, urb, len);
127                 bytes_sent += len;
128         } else
129                 udl_urb_completion(urb);
130
131 error:
132         atomic_add(bytes_sent, &udl->bytes_sent);
133         atomic_add(bytes_identical, &udl->bytes_identical);
134         atomic_add(bytes_rendered, &udl->bytes_rendered);
135         end_cycles = get_cycles();
136         atomic_add(((unsigned int) ((end_cycles - start_cycles)
137                     >> 10)), /* Kcycles */
138                    &udl->cpu_kcycles_used);
139 }
140
141 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
142                       int width, int height)
143 {
144         struct drm_device *dev = fb->base.dev;
145         struct udl_device *udl = dev->dev_private;
146         int i, ret;
147         char *cmd;
148         cycles_t start_cycles, end_cycles;
149         int bytes_sent = 0;
150         int bytes_identical = 0;
151         struct urb *urb;
152         int aligned_x;
153         int bpp = (fb->base.bits_per_pixel / 8);
154
155         if (!fb->active_16)
156                 return 0;
157
158         if (!fb->obj->vmapping) {
159                 ret = udl_gem_vmap(fb->obj);
160                 if (ret == -ENOMEM) {
161                         DRM_ERROR("failed to vmap fb\n");
162                         return 0;
163                 }
164                 if (!fb->obj->vmapping) {
165                         DRM_ERROR("failed to vmapping\n");
166                         return 0;
167                 }
168         }
169
170         start_cycles = get_cycles();
171
172         aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
173         width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
174         x = aligned_x;
175
176         if ((width <= 0) ||
177             (x + width > fb->base.width) ||
178             (y + height > fb->base.height))
179                 return -EINVAL;
180
181         urb = udl_get_urb(dev);
182         if (!urb)
183                 return 0;
184         cmd = urb->transfer_buffer;
185
186         for (i = y; i < y + height ; i++) {
187                 const int line_offset = fb->base.pitches[0] * i;
188                 const int byte_offset = line_offset + (x * bpp);
189
190                 if (udl_render_hline(dev, bpp, &urb,
191                                      (char *) fb->obj->vmapping,
192                                      &cmd, byte_offset, width * bpp,
193                                      &bytes_identical, &bytes_sent))
194                         goto error;
195         }
196
197         if (cmd > (char *) urb->transfer_buffer) {
198                 /* Send partial buffer remaining before exiting */
199                 int len = cmd - (char *) urb->transfer_buffer;
200                 ret = udl_submit_urb(dev, urb, len);
201                 bytes_sent += len;
202         } else
203                 udl_urb_completion(urb);
204
205 error:
206         atomic_add(bytes_sent, &udl->bytes_sent);
207         atomic_add(bytes_identical, &udl->bytes_identical);
208         atomic_add(width*height*bpp, &udl->bytes_rendered);
209         end_cycles = get_cycles();
210         atomic_add(((unsigned int) ((end_cycles - start_cycles)
211                     >> 10)), /* Kcycles */
212                    &udl->cpu_kcycles_used);
213
214         return 0;
215 }
216
217 static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
218 {
219         unsigned long start = vma->vm_start;
220         unsigned long size = vma->vm_end - vma->vm_start;
221         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
222         unsigned long page, pos;
223
224         if (offset + size > info->fix.smem_len)
225                 return -EINVAL;
226
227         pos = (unsigned long)info->fix.smem_start + offset;
228
229         pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
230                   pos, size);
231
232         while (size > 0) {
233                 page = vmalloc_to_pfn((void *)pos);
234                 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
235                         return -EAGAIN;
236
237                 start += PAGE_SIZE;
238                 pos += PAGE_SIZE;
239                 if (size > PAGE_SIZE)
240                         size -= PAGE_SIZE;
241                 else
242                         size = 0;
243         }
244
245         vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
246         return 0;
247 }
248
249 static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
250 {
251         struct udl_fbdev *ufbdev = info->par;
252
253         sys_fillrect(info, rect);
254
255         udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
256                           rect->height);
257 }
258
259 static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
260 {
261         struct udl_fbdev *ufbdev = info->par;
262
263         sys_copyarea(info, region);
264
265         udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
266                           region->height);
267 }
268
269 static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
270 {
271         struct udl_fbdev *ufbdev = info->par;
272
273         sys_imageblit(info, image);
274
275         udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
276                           image->height);
277 }
278
279 /*
280  * It's common for several clients to have framebuffer open simultaneously.
281  * e.g. both fbcon and X. Makes things interesting.
282  * Assumes caller is holding info->lock (for open and release at least)
283  */
284 static int udl_fb_open(struct fb_info *info, int user)
285 {
286         struct udl_fbdev *ufbdev = info->par;
287         struct drm_device *dev = ufbdev->ufb.base.dev;
288         struct udl_device *udl = dev->dev_private;
289
290         /* If the USB device is gone, we don't accept new opens */
291         if (drm_device_is_unplugged(udl->ddev))
292                 return -ENODEV;
293
294         ufbdev->fb_count++;
295
296         if (fb_defio && (info->fbdefio == NULL)) {
297                 /* enable defio at last moment if not disabled by client */
298
299                 struct fb_deferred_io *fbdefio;
300
301                 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
302
303                 if (fbdefio) {
304                         fbdefio->delay = DL_DEFIO_WRITE_DELAY;
305                         fbdefio->deferred_io = udlfb_dpy_deferred_io;
306                 }
307
308                 info->fbdefio = fbdefio;
309                 fb_deferred_io_init(info);
310         }
311
312         pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
313                   info->node, user, info, ufbdev->fb_count);
314
315         return 0;
316 }
317
318
319 /*
320  * Assumes caller is holding info->lock mutex (for open and release at least)
321  */
322 static int udl_fb_release(struct fb_info *info, int user)
323 {
324         struct udl_fbdev *ufbdev = info->par;
325
326         ufbdev->fb_count--;
327
328         if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
329                 fb_deferred_io_cleanup(info);
330                 kfree(info->fbdefio);
331                 info->fbdefio = NULL;
332                 info->fbops->fb_mmap = udl_fb_mmap;
333         }
334
335         pr_warn("released /dev/fb%d user=%d count=%d\n",
336                 info->node, user, ufbdev->fb_count);
337
338         return 0;
339 }
340
341 static struct fb_ops udlfb_ops = {
342         .owner = THIS_MODULE,
343         .fb_check_var = drm_fb_helper_check_var,
344         .fb_set_par = drm_fb_helper_set_par,
345         .fb_fillrect = udl_fb_fillrect,
346         .fb_copyarea = udl_fb_copyarea,
347         .fb_imageblit = udl_fb_imageblit,
348         .fb_pan_display = drm_fb_helper_pan_display,
349         .fb_blank = drm_fb_helper_blank,
350         .fb_setcmap = drm_fb_helper_setcmap,
351         .fb_debug_enter = drm_fb_helper_debug_enter,
352         .fb_debug_leave = drm_fb_helper_debug_leave,
353         .fb_mmap = udl_fb_mmap,
354         .fb_open = udl_fb_open,
355         .fb_release = udl_fb_release,
356 };
357
358 void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
359                            u16 blue, int regno)
360 {
361 }
362
363 void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
364                              u16 *blue, int regno)
365 {
366         *red = 0;
367         *green = 0;
368         *blue = 0;
369 }
370
371 static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
372                                       struct drm_file *file,
373                                       unsigned flags, unsigned color,
374                                       struct drm_clip_rect *clips,
375                                       unsigned num_clips)
376 {
377         struct udl_framebuffer *ufb = to_udl_fb(fb);
378         int i;
379
380         if (!ufb->active_16)
381                 return 0;
382
383         for (i = 0; i < num_clips; i++) {
384                 udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
385                                   clips[i].x2 - clips[i].x1,
386                                   clips[i].y2 - clips[i].y1);
387         }
388         return 0;
389 }
390
391 static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
392 {
393         struct udl_framebuffer *ufb = to_udl_fb(fb);
394
395         if (ufb->obj)
396                 drm_gem_object_unreference_unlocked(&ufb->obj->base);
397
398         drm_framebuffer_cleanup(fb);
399         kfree(ufb);
400 }
401
402 static const struct drm_framebuffer_funcs udlfb_funcs = {
403         .destroy = udl_user_framebuffer_destroy,
404         .dirty = udl_user_framebuffer_dirty,
405         .create_handle = NULL,
406 };
407
408
409 static int
410 udl_framebuffer_init(struct drm_device *dev,
411                      struct udl_framebuffer *ufb,
412                      struct drm_mode_fb_cmd2 *mode_cmd,
413                      struct udl_gem_object *obj)
414 {
415         int ret;
416
417         ufb->obj = obj;
418         ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
419         drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
420         return ret;
421 }
422
423
424 static int udlfb_create(struct udl_fbdev *ufbdev,
425                         struct drm_fb_helper_surface_size *sizes)
426 {
427         struct drm_device *dev = ufbdev->helper.dev;
428         struct fb_info *info;
429         struct device *device = &dev->usbdev->dev;
430         struct drm_framebuffer *fb;
431         struct drm_mode_fb_cmd2 mode_cmd;
432         struct udl_gem_object *obj;
433         uint32_t size;
434         int ret = 0;
435
436         if (sizes->surface_bpp == 24)
437                 sizes->surface_bpp = 32;
438
439         mode_cmd.width = sizes->surface_width;
440         mode_cmd.height = sizes->surface_height;
441         mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
442
443         mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
444                                                           sizes->surface_depth);
445
446         size = mode_cmd.pitches[0] * mode_cmd.height;
447         size = ALIGN(size, PAGE_SIZE);
448
449         obj = udl_gem_alloc_object(dev, size);
450         if (!obj)
451                 goto out;
452
453         ret = udl_gem_vmap(obj);
454         if (ret) {
455                 DRM_ERROR("failed to vmap fb\n");
456                 goto out_gfree;
457         }
458
459         info = framebuffer_alloc(0, device);
460         if (!info) {
461                 ret = -ENOMEM;
462                 goto out_gfree;
463         }
464         info->par = ufbdev;
465
466         ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
467         if (ret)
468                 goto out_gfree;
469
470         fb = &ufbdev->ufb.base;
471
472         ufbdev->helper.fb = fb;
473         ufbdev->helper.fbdev = info;
474
475         strcpy(info->fix.id, "udldrmfb");
476
477         info->screen_base = ufbdev->ufb.obj->vmapping;
478         info->fix.smem_len = size;
479         info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
480
481         info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
482         info->fbops = &udlfb_ops;
483         drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
484         drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
485
486         ret = fb_alloc_cmap(&info->cmap, 256, 0);
487         if (ret) {
488                 ret = -ENOMEM;
489                 goto out_gfree;
490         }
491
492
493         DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
494                       fb->width, fb->height,
495                       ufbdev->ufb.obj->vmapping);
496
497         return ret;
498 out_gfree:
499         drm_gem_object_unreference(&ufbdev->ufb.obj->base);
500 out:
501         return ret;
502 }
503
504 static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
505                                         struct drm_fb_helper_surface_size *sizes)
506 {
507         struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
508         int new_fb = 0;
509         int ret;
510
511         if (!helper->fb) {
512                 ret = udlfb_create(ufbdev, sizes);
513                 if (ret)
514                         return ret;
515
516                 new_fb = 1;
517         }
518         return new_fb;
519 }
520
521 static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
522         .gamma_set = udl_crtc_fb_gamma_set,
523         .gamma_get = udl_crtc_fb_gamma_get,
524         .fb_probe = udl_fb_find_or_create_single,
525 };
526
527 static void udl_fbdev_destroy(struct drm_device *dev,
528                               struct udl_fbdev *ufbdev)
529 {
530         struct fb_info *info;
531         if (ufbdev->helper.fbdev) {
532                 info = ufbdev->helper.fbdev;
533                 unregister_framebuffer(info);
534                 if (info->cmap.len)
535                         fb_dealloc_cmap(&info->cmap);
536                 framebuffer_release(info);
537         }
538         drm_fb_helper_fini(&ufbdev->helper);
539         drm_framebuffer_cleanup(&ufbdev->ufb.base);
540         drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
541 }
542
543 int udl_fbdev_init(struct drm_device *dev)
544 {
545         struct udl_device *udl = dev->dev_private;
546         int bpp_sel = fb_bpp;
547         struct udl_fbdev *ufbdev;
548         int ret;
549
550         ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
551         if (!ufbdev)
552                 return -ENOMEM;
553
554         udl->fbdev = ufbdev;
555         ufbdev->helper.funcs = &udl_fb_helper_funcs;
556
557         ret = drm_fb_helper_init(dev, &ufbdev->helper,
558                                  1, 1);
559         if (ret) {
560                 kfree(ufbdev);
561                 return ret;
562
563         }
564
565         drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
566         drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
567         return 0;
568 }
569
570 void udl_fbdev_cleanup(struct drm_device *dev)
571 {
572         struct udl_device *udl = dev->dev_private;
573         if (!udl->fbdev)
574                 return;
575
576         udl_fbdev_destroy(dev, udl->fbdev);
577         kfree(udl->fbdev);
578         udl->fbdev = NULL;
579 }
580
581 void udl_fbdev_unplug(struct drm_device *dev)
582 {
583         struct udl_device *udl = dev->dev_private;
584         struct udl_fbdev *ufbdev;
585         if (!udl->fbdev)
586                 return;
587
588         ufbdev = udl->fbdev;
589         if (ufbdev->helper.fbdev) {
590                 struct fb_info *info;
591                 info = ufbdev->helper.fbdev;
592                 unlink_framebuffer(info);
593         }
594 }
595
596 struct drm_framebuffer *
597 udl_fb_user_fb_create(struct drm_device *dev,
598                    struct drm_file *file,
599                    struct drm_mode_fb_cmd2 *mode_cmd)
600 {
601         struct drm_gem_object *obj;
602         struct udl_framebuffer *ufb;
603         int ret;
604         uint32_t size;
605
606         obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
607         if (obj == NULL)
608                 return ERR_PTR(-ENOENT);
609
610         size = mode_cmd->pitches[0] * mode_cmd->height;
611         size = ALIGN(size, PAGE_SIZE);
612
613         if (size > obj->size) {
614                 DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
615                 return ERR_PTR(-ENOMEM);
616         }
617
618         ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
619         if (ufb == NULL)
620                 return ERR_PTR(-ENOMEM);
621
622         ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
623         if (ret) {
624                 kfree(ufb);
625                 return ERR_PTR(-EINVAL);
626         }
627         return &ufb->base;
628 }