v4l2: dequeue buffers properly on VIDIOC_STREAMOFF
[cascardo/linux.git] / drivers / media / video / videobuf2-core.c
index 2e8f1df..0956bd2 100644 (file)
@@ -105,6 +105,36 @@ static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
        }
 }
 
+/**
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
+{
+       if (!p->mem_priv)
+               return;
+
+       if (p->dbuf_mapped)
+               call_memop(q, unmap_dmabuf, p->mem_priv);
+
+       call_memop(q, detach_dmabuf, p->mem_priv);
+       dma_buf_put(p->dbuf);
+       memset(p, 0, sizeof *p);
+}
+
+/**
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+       struct vb2_queue *q = vb->vb2_queue;
+       unsigned int plane;
+
+       for (plane = 0; plane < vb->num_planes; ++plane)
+               __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+}
+
 /**
  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  * every buffer on the queue
@@ -227,6 +257,8 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
                /* Free MMAP buffers or release USERPTR buffers */
                if (q->memory == V4L2_MEMORY_MMAP)
                        __vb2_buf_mem_free(vb);
+               else if (q->memory == V4L2_MEMORY_DMABUF)
+                       __vb2_buf_dmabuf_put(vb);
                else
                        __vb2_buf_userptr_put(vb);
        }
@@ -349,6 +381,12 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
                 */
                memcpy(b->m.planes, vb->v4l2_planes,
                        b->length * sizeof(struct v4l2_plane));
+
+               if (q->memory == V4L2_MEMORY_DMABUF) {
+                       unsigned int plane;
+                       for (plane = 0; plane < vb->num_planes; ++plane)
+                               b->m.planes[plane].m.fd = 0;
+               }
        } else {
                /*
                 * We use length and offset in v4l2_planes array even for
@@ -360,6 +398,8 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
                        b->m.offset = vb->v4l2_planes[0].m.mem_offset;
                else if (q->memory == V4L2_MEMORY_USERPTR)
                        b->m.userptr = vb->v4l2_planes[0].m.userptr;
+               else if (q->memory == V4L2_MEMORY_DMABUF)
+                       b->m.fd = 0;
        }
 
        /*
@@ -450,6 +490,20 @@ static int __verify_mmap_ops(struct vb2_queue *q)
        return 0;
 }
 
+/**
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+       if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+           !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
+           !q->mem_ops->unmap_dmabuf)
+               return -EINVAL;
+
+       return 0;
+}
+
 /**
  * vb2_reqbufs() - Initiate streaming
  * @q:         videobuf2 queue
@@ -483,8 +537,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
                return -EBUSY;
        }
 
-       if (req->memory != V4L2_MEMORY_MMAP
-                       && req->memory != V4L2_MEMORY_USERPTR) {
+       if (req->memory != V4L2_MEMORY_MMAP &&
+           req->memory != V4L2_MEMORY_DMABUF &&
+           req->memory != V4L2_MEMORY_USERPTR) {
                dprintk(1, "reqbufs: unsupported memory type\n");
                return -EINVAL;
        }
@@ -513,6 +568,11 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
                return -EINVAL;
        }
 
+       if (req->memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+               dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
+               return -EINVAL;
+       }
+
        if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
                /*
                 * We already have buffers allocated, so first check if they
@@ -619,8 +679,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
                return -EBUSY;
        }
 
-       if (create->memory != V4L2_MEMORY_MMAP
-                       && create->memory != V4L2_MEMORY_USERPTR) {
+       if (create->memory != V4L2_MEMORY_MMAP &&
+           create->memory != V4L2_MEMORY_USERPTR &&
+           create->memory != V4L2_MEMORY_DMABUF) {
                dprintk(1, "%s(): unsupported memory type\n", __func__);
                return -EINVAL;
        }
@@ -644,6 +705,11 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
                return -EINVAL;
        }
 
+       if (create->memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+               dprintk(1, "%s(): DMABUF for current setup unsupported\n", __func__);
+               return -EINVAL;
+       }
+
        if (q->num_buffers == VIDEO_MAX_FRAME) {
                dprintk(1, "%s(): maximum number of buffers already allocated\n",
                        __func__);
@@ -776,6 +842,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
 {
        struct vb2_queue *q = vb->vb2_queue;
        unsigned long flags;
+       unsigned int plane;
 
        if (vb->state != VB2_BUF_STATE_ACTIVE)
                return;
@@ -786,6 +853,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        dprintk(4, "Done processing on buffer %d, state: %d\n",
                        vb->v4l2_buf.index, vb->state);
 
+       /* sync buffers */
+       for (plane = 0; plane < vb->num_planes; ++plane)
+               call_memop(q, finish, vb->planes[plane].mem_priv);
+
        /* Add the buffer to the done buffers list */
        spin_lock_irqsave(&q->done_lock, flags);
        vb->state = state;
@@ -839,6 +910,14 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
                                        b->m.planes[plane].length;
                        }
                }
+               if (b->memory == V4L2_MEMORY_DMABUF) {
+                       for (plane = 0; plane < vb->num_planes; ++plane) {
+                               v4l2_planes[plane].bytesused =
+                                       b->m.planes[plane].bytesused;
+                               v4l2_planes[plane].m.fd =
+                                       b->m.planes[plane].m.fd;
+                       }
+               }
        } else {
                /*
                 * Single-planar buffers do not use planes array,
@@ -853,6 +932,10 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
                        v4l2_planes[0].m.userptr = b->m.userptr;
                        v4l2_planes[0].length = b->length;
                }
+
+               if (b->memory == V4L2_MEMORY_DMABUF)
+                       v4l2_planes[0].m.fd = b->m.fd;
+
        }
 
        vb->v4l2_buf.field = b->field;
@@ -956,15 +1039,115 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
        return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
 }
 
+/**
+ * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ */
+static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+       struct v4l2_plane planes[VIDEO_MAX_PLANES];
+       struct vb2_queue *q = vb->vb2_queue;
+       void *mem_priv;
+       unsigned int plane;
+       int ret;
+       int write = !V4L2_TYPE_IS_OUTPUT(q->type);
+
+       /* Verify and copy relevant information provided by the userspace */
+       ret = __fill_vb2_buffer(vb, b, planes);
+       if (ret)
+               return ret;
+
+       for (plane = 0; plane < vb->num_planes; ++plane) {
+               struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+               if (IS_ERR_OR_NULL(dbuf)) {
+                       dprintk(1, "qbuf: invalid dmabuf fd for "
+                               "plane %d\n", plane);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               /* Skip the plane if already verified */
+               if (dbuf == vb->planes[plane].dbuf) {
+                       planes[plane].length = dbuf->size;
+                       dma_buf_put(dbuf);
+                       continue;
+               }
+
+               dprintk(3, "qbuf: buffer description for plane %d changed, "
+                       "reattaching dma buf\n", plane);
+
+               /* Release previously acquired memory if present */
+               __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
+
+               /* Acquire each plane's memory */
+               mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
+                       dbuf, q->plane_sizes[plane], write);
+               if (IS_ERR(mem_priv)) {
+                       dprintk(1, "qbuf: failed acquiring dmabuf "
+                               "memory for plane %d\n", plane);
+                       ret = PTR_ERR(mem_priv);
+                       goto err;
+               }
+
+               planes[plane].length = dbuf->size;
+               vb->planes[plane].dbuf = dbuf;
+               vb->planes[plane].mem_priv = mem_priv;
+       }
+
+       /* TODO: This pins the buffer(s) with  dma_buf_map_attachment()).. but
+        * really we want to do this just before the DMA, not while queueing
+        * the buffer(s)..
+        */
+       for (plane = 0; plane < vb->num_planes; ++plane) {
+               ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
+               if (ret) {
+                       dprintk(1, "qbuf: failed mapping dmabuf "
+                               "memory for plane %d\n", plane);
+                       goto err;
+               }
+               vb->planes[plane].dbuf_mapped = 1;
+       }
+
+       /*
+        * Call driver-specific initialization on the newly acquired buffer,
+        * if provided.
+        */
+       ret = call_qop(q, buf_init, vb);
+       if (ret) {
+               dprintk(1, "qbuf: buffer initialization failed\n");
+               goto err;
+       }
+
+       /*
+        * Now that everything is in order, copy relevant information
+        * provided by userspace.
+        */
+       for (plane = 0; plane < vb->num_planes; ++plane)
+               vb->v4l2_planes[plane] = planes[plane];
+
+       return 0;
+err:
+       /* In case of errors, release planes that were already acquired */
+       __vb2_buf_dmabuf_put(vb);
+
+       return ret;
+}
+
 /**
  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  */
 static void __enqueue_in_driver(struct vb2_buffer *vb)
 {
        struct vb2_queue *q = vb->vb2_queue;
+       unsigned int plane;
 
        vb->state = VB2_BUF_STATE_ACTIVE;
        atomic_inc(&q->queued_count);
+
+       /* sync buffers */
+       for (plane = 0; plane < vb->num_planes; ++plane)
+               call_memop(q, prepare, vb->planes[plane].mem_priv);
+
        q->ops->buf_queue(vb);
 }
 
@@ -980,6 +1163,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
        case V4L2_MEMORY_USERPTR:
                ret = __qbuf_userptr(vb, b);
                break;
+       case V4L2_MEMORY_DMABUF:
+               ret = __qbuf_dmabuf(vb, b);
+               break;
        default:
                WARN(1, "Invalid queue type\n");
                ret = -EINVAL;
@@ -1287,6 +1473,35 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
 
+/**
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+       struct vb2_queue *q = vb->vb2_queue;
+       unsigned int i;
+
+       /* nothing to do if the buffer is already dequeued */
+       if (vb->state == VB2_BUF_STATE_DEQUEUED)
+               return;
+
+       vb->state = VB2_BUF_STATE_DEQUEUED;
+
+       /* unmap DMABUF buffer
+        * TODO: this unpins the buffer(dma_buf_unmap_attachment()).. but
+        * really we want to do this just after DMA, not when the
+        * buffer is dequeued..
+        */
+       if (q->memory == V4L2_MEMORY_DMABUF) {
+               for (i = 0; i < vb->num_planes; ++i) {
+                       if (!vb->planes[i].dbuf_mapped)
+                               continue;
+                       call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
+                       vb->planes[i].dbuf_mapped = 0;
+               }
+       }
+}
+
 /**
  * vb2_dqbuf() - Dequeue a buffer to the userspace
  * @q:         videobuf2 queue
@@ -1351,11 +1566,12 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
        __fill_v4l2_buffer(vb, b);
        /* Remove from videobuf queue */
        list_del(&vb->queued_entry);
+       /* go back to dequeued state */
+       __vb2_dqbuf(vb);
 
        dprintk(1, "dqbuf of buffer %d, with state %d\n",
                        vb->v4l2_buf.index, vb->state);
 
-       vb->state = VB2_BUF_STATE_DEQUEUED;
        return 0;
 }
 EXPORT_SYMBOL_GPL(vb2_dqbuf);
@@ -1394,7 +1610,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
         * Reinitialize all buffers for next use.
         */
        for (i = 0; i < q->num_buffers; ++i)
-               q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
+               __vb2_dqbuf(q->bufs[i]);
 }
 
 /**
@@ -1498,6 +1714,78 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
 }
 EXPORT_SYMBOL_GPL(vb2_streamoff);
 
+/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q:         videobuf2 queue
+ * @eb:                export buffer structure passed from userspace to vidioc_expbuf
+ *             handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+       struct vb2_buffer *vb = NULL;
+       struct vb2_plane *vb_plane;
+       int ret;
+       struct dma_buf *dbuf;
+
+       if (q->memory != V4L2_MEMORY_MMAP) {
+               dprintk(1, "Queue is not currently set up for mmap\n");
+               return -EINVAL;
+       }
+
+       if (!q->mem_ops->get_dmabuf) {
+               dprintk(1, "Queue does not support DMA buffer exporting\n");
+               return -EINVAL;
+       }
+
+       if (eb->flags & ~O_CLOEXEC) {
+               dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Find the plane corresponding to the offset passed by userspace.
+        */
+       if (eb->type != q->type) {
+               dprintk(1, "invalid type %u\n", eb->type);
+               return -EINVAL;
+       }
+       if (eb->index >= q->num_buffers) {
+               dprintk(1, "invalid buffer %u\n", eb->index);
+               return -EINVAL;
+       }
+       vb = q->bufs[eb->index];
+       if (eb->plane >= vb->num_planes) {
+               dprintk(1, "invalid plane %u\n", eb->plane);
+               return -EINVAL;
+       }
+       vb_plane = &vb->planes[eb->plane];
+
+       dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+       if (IS_ERR_OR_NULL(dbuf)) {
+               dprintk(1, "Failed to export buffer %d, plane %d\n",
+                       eb->index, eb->plane);
+               return -EINVAL;
+       }
+
+       ret = dma_buf_fd(dbuf, eb->flags);
+       if (ret < 0) {
+               dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
+                       eb->index, eb->plane, ret);
+               dma_buf_put(dbuf);
+               return ret;
+       }
+
+       dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
+               eb->index, eb->plane, ret);
+       eb->fd = ret;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
 /**
  * __find_plane_by_offset() - find plane associated with the given offset off
  */