2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
4 * Helper functions for devices that use videobuf buffers for both their
5 * source and destination.
7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8 * Pawel Osciak, <pawel@osciak.com>
9 * Marek Szyprowski, <m.szyprowski@samsung.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
23 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
24 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
25 MODULE_LICENSE("GPL");
28 module_param(debug, bool, 0644);
30 #define dprintk(fmt, arg...) \
33 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
36 /* Instance is already queued on the job_queue */
37 #define TRANS_QUEUED (1 << 0)
38 /* Instance is currently running in hardware */
39 #define TRANS_RUNNING (1 << 1)
42 /* Offset base for buffers on the destination queue - used to distinguish
43 * between source and destination buffers when mmapping - they receive the same
44 * offsets but for different queues */
45 #define DST_QUEUE_OFF_BASE (1 << 30)
49 * struct v4l2_m2m_dev - per-device context
50 * @curr_ctx: currently running instance
51 * @job_queue: instances queued to run
52 * @job_spinlock: protects job_queue
53 * @m2m_ops: driver callbacks
56 struct v4l2_m2m_ctx *curr_ctx;
58 struct list_head job_queue;
59 spinlock_t job_spinlock;
61 struct v4l2_m2m_ops *m2m_ops;
64 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
65 enum v4l2_buf_type type)
67 if (V4L2_TYPE_IS_OUTPUT(type))
68 return &m2m_ctx->out_q_ctx;
70 return &m2m_ctx->cap_q_ctx;
74 * v4l2_m2m_get_vq() - return vb2_queue for the given type
76 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
77 enum v4l2_buf_type type)
79 struct v4l2_m2m_queue_ctx *q_ctx;
81 q_ctx = get_queue_ctx(m2m_ctx, type);
87 EXPORT_SYMBOL(v4l2_m2m_get_vq);
90 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
94 struct v4l2_m2m_buffer *b = NULL;
97 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
99 if (list_empty(&q_ctx->rdy_queue)) {
100 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
104 b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
105 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
111 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
114 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
116 struct v4l2_m2m_buffer *b = NULL;
119 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
120 if (list_empty(&q_ctx->rdy_queue)) {
121 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
124 b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
127 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
131 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
134 * Scheduling handlers
138 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
139 * running instance or NULL if no instance is running
141 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
147 if (m2m_dev->curr_ctx)
148 ret = m2m_dev->curr_ctx->priv;
149 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
153 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
156 * v4l2_m2m_try_run() - select next job to perform and run it if possible
158 * Get next transaction (if present) from the waiting jobs list and run it.
160 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
164 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
165 if (NULL != m2m_dev->curr_ctx) {
166 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
167 dprintk("Another instance is running, won't run now\n");
171 if (list_empty(&m2m_dev->job_queue)) {
172 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
173 dprintk("No job pending\n");
177 m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
178 struct v4l2_m2m_ctx, queue);
179 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
180 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
182 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
186 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
187 * the pending job queue and add it if so.
188 * @m2m_ctx: m2m context assigned to the instance to be checked
190 * There are three basic requirements an instance has to meet to be able to run:
191 * 1) at least one source buffer has to be queued,
192 * 2) at least one destination buffer has to be queued,
193 * 3) streaming has to be on.
195 * There may also be additional, custom requirements. In such case the driver
196 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
197 * return 1 if the instance is ready.
198 * An example of the above could be an instance that requires more than one
199 * src/dst buffer per transaction.
201 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
203 struct v4l2_m2m_dev *m2m_dev;
204 unsigned long flags_job, flags;
206 m2m_dev = m2m_ctx->m2m_dev;
207 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
209 if (!m2m_ctx->out_q_ctx.q.streaming
210 || !m2m_ctx->cap_q_ctx.q.streaming) {
211 dprintk("Streaming needs to be on for both queues\n");
215 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
216 if (m2m_ctx->job_flags & TRANS_QUEUED) {
217 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
218 dprintk("On job queue already\n");
222 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
223 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
224 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
225 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
226 dprintk("No input buffers available\n");
229 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
230 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
231 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
232 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
233 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
234 dprintk("No output buffers available\n");
237 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
238 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
240 if (m2m_dev->m2m_ops->job_ready
241 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
242 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
243 dprintk("Driver not ready\n");
247 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
248 m2m_ctx->job_flags |= TRANS_QUEUED;
250 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
252 v4l2_m2m_try_run(m2m_dev);
256 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
257 * and have it clean up
259 * Called by a driver to yield back the device after it has finished with it.
260 * Should be called as soon as possible after reaching a state which allows
261 * other instances to take control of the device.
263 * This function has to be called only after device_run() callback has been
264 * called on the driver. To prevent recursion, it should not be called directly
265 * from the device_run() callback though.
267 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
268 struct v4l2_m2m_ctx *m2m_ctx)
272 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
273 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
274 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
275 dprintk("Called by an instance not currently running\n");
279 list_del(&m2m_dev->curr_ctx->queue);
280 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
281 wake_up(&m2m_dev->curr_ctx->finished);
282 m2m_dev->curr_ctx = NULL;
284 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
286 /* This instance might have more buffers ready, but since we do not
287 * allow more than one job on the job_queue per instance, each has
288 * to be scheduled separately after the previous one finishes. */
289 v4l2_m2m_try_schedule(m2m_ctx);
290 v4l2_m2m_try_run(m2m_dev);
292 EXPORT_SYMBOL(v4l2_m2m_job_finish);
295 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
297 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
298 struct v4l2_requestbuffers *reqbufs)
300 struct vb2_queue *vq;
302 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
303 return vb2_reqbufs(vq, reqbufs);
305 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
308 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
310 * See v4l2_m2m_mmap() documentation for details.
312 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
313 struct v4l2_buffer *buf)
315 struct vb2_queue *vq;
319 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
320 ret = vb2_querybuf(vq, buf);
322 /* Adjust MMAP memory offsets for the CAPTURE queue */
323 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
324 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
325 for (i = 0; i < buf->length; ++i)
326 buf->m.planes[i].m.mem_offset
327 += DST_QUEUE_OFF_BASE;
329 buf->m.offset += DST_QUEUE_OFF_BASE;
335 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
337 * v4l2_m2m_expbuf() - multi-queue-not-aware EXPBUF
339 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
340 struct v4l2_exportbuffer *eb)
342 struct vb2_queue *vq;
343 if (eb->mem_offset < DST_QUEUE_OFF_BASE) {
344 vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
346 vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
347 eb->mem_offset -= DST_QUEUE_OFF_BASE;
349 return vb2_expbuf(vq, eb);
351 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
354 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
357 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
358 struct v4l2_buffer *buf)
360 struct vb2_queue *vq;
363 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
364 ret = vb2_qbuf(vq, buf);
366 v4l2_m2m_try_schedule(m2m_ctx);
370 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
373 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
376 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
377 struct v4l2_buffer *buf)
379 struct vb2_queue *vq;
381 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
382 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
384 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
387 * v4l2_m2m_streamon() - turn on streaming for a video queue
389 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
390 enum v4l2_buf_type type)
392 struct vb2_queue *vq;
395 vq = v4l2_m2m_get_vq(m2m_ctx, type);
396 ret = vb2_streamon(vq, type);
398 v4l2_m2m_try_schedule(m2m_ctx);
402 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
405 * v4l2_m2m_streamoff() - turn off streaming for a video queue
407 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
408 enum v4l2_buf_type type)
410 struct v4l2_m2m_dev *m2m_dev;
411 struct v4l2_m2m_queue_ctx *q_ctx;
412 unsigned long flags_job, flags;
415 q_ctx = get_queue_ctx(m2m_ctx, type);
416 ret = vb2_streamoff(&q_ctx->q, type);
420 m2m_dev = m2m_ctx->m2m_dev;
421 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
422 /* We should not be scheduled anymore, since we're dropping a queue. */
423 INIT_LIST_HEAD(&m2m_ctx->queue);
424 m2m_ctx->job_flags = 0;
426 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
427 /* Drop queue, since streamoff returns device to the same state as after
428 * calling reqbufs. */
429 INIT_LIST_HEAD(&q_ctx->rdy_queue);
430 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
432 if (m2m_dev->curr_ctx == m2m_ctx) {
433 m2m_dev->curr_ctx = NULL;
434 wake_up(&m2m_ctx->finished);
436 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
440 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
443 * v4l2_m2m_poll() - poll replacement, for destination buffers only
445 * Call from the driver's poll() function. Will poll both queues. If a buffer
446 * is available to dequeue (with dqbuf) from the source queue, this will
447 * indicate that a non-blocking write can be performed, while read will be
448 * returned in case of the destination queue.
450 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
451 struct poll_table_struct *wait)
453 struct vb2_queue *src_q, *dst_q;
454 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
458 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
459 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
462 * There has to be at least one buffer queued on each queued_list, which
463 * means either in driver already or waiting for driver to claim it
464 * and start processing.
466 if ((!src_q->streaming || list_empty(&src_q->queued_list))
467 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
472 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
473 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
475 poll_wait(file, &src_q->done_wq, wait);
476 poll_wait(file, &dst_q->done_wq, wait);
478 if (m2m_ctx->m2m_dev->m2m_ops->lock)
479 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
481 spin_lock_irqsave(&src_q->done_lock, flags);
482 if (!list_empty(&src_q->done_list))
483 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
485 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
486 || src_vb->state == VB2_BUF_STATE_ERROR))
487 rc |= POLLOUT | POLLWRNORM;
488 spin_unlock_irqrestore(&src_q->done_lock, flags);
490 spin_lock_irqsave(&dst_q->done_lock, flags);
491 if (!list_empty(&dst_q->done_list))
492 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
494 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
495 || dst_vb->state == VB2_BUF_STATE_ERROR))
496 rc |= POLLIN | POLLRDNORM;
497 spin_unlock_irqrestore(&dst_q->done_lock, flags);
502 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
505 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
507 * Call from driver's mmap() function. Will handle mmap() for both queues
508 * seamlessly for videobuffer, which will receive normal per-queue offsets and
509 * proper videobuf queue pointers. The differentiation is made outside videobuf
510 * by adding a predefined offset to buffers from one of the queues and
511 * subtracting it before passing it back to videobuf. Only drivers (and
512 * thus applications) receive modified offsets.
514 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
515 struct vm_area_struct *vma)
517 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
518 struct vb2_queue *vq;
520 if (offset < DST_QUEUE_OFF_BASE) {
521 vq = v4l2_m2m_get_src_vq(m2m_ctx);
523 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
524 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
527 return vb2_mmap(vq, vma);
529 EXPORT_SYMBOL(v4l2_m2m_mmap);
532 * v4l2_m2m_init() - initialize per-driver m2m data
534 * Usually called from driver's probe() function.
536 struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
538 struct v4l2_m2m_dev *m2m_dev;
541 return ERR_PTR(-EINVAL);
543 BUG_ON(!m2m_ops->device_run);
544 BUG_ON(!m2m_ops->job_abort);
546 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
548 return ERR_PTR(-ENOMEM);
550 m2m_dev->curr_ctx = NULL;
551 m2m_dev->m2m_ops = m2m_ops;
552 INIT_LIST_HEAD(&m2m_dev->job_queue);
553 spin_lock_init(&m2m_dev->job_spinlock);
557 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
560 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
562 * Usually called from driver's remove() function.
564 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
568 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
571 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
572 * @priv - driver's instance private data
573 * @m2m_dev - a previously initialized m2m_dev struct
574 * @vq_init - a callback for queue type-specific initialization function to be
575 * used for initializing videobuf_queues
577 * Usually called from driver's open() function.
579 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
581 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
583 struct v4l2_m2m_ctx *m2m_ctx;
584 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
587 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
589 return ERR_PTR(-ENOMEM);
591 m2m_ctx->priv = drv_priv;
592 m2m_ctx->m2m_dev = m2m_dev;
593 init_waitqueue_head(&m2m_ctx->finished);
595 out_q_ctx = &m2m_ctx->out_q_ctx;
596 cap_q_ctx = &m2m_ctx->cap_q_ctx;
598 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
599 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
600 spin_lock_init(&out_q_ctx->rdy_spinlock);
601 spin_lock_init(&cap_q_ctx->rdy_spinlock);
603 INIT_LIST_HEAD(&m2m_ctx->queue);
605 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
615 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
618 * v4l2_m2m_ctx_release() - release m2m context
620 * Usually called from driver's release() function.
622 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
624 struct v4l2_m2m_dev *m2m_dev;
627 m2m_dev = m2m_ctx->m2m_dev;
629 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
630 if (m2m_ctx->job_flags & TRANS_RUNNING) {
631 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
632 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
633 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
634 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
635 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
636 list_del(&m2m_ctx->queue);
637 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
638 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
639 dprintk("m2m_ctx: %p had been on queue and was removed\n",
642 /* Do nothing, was not on queue/running */
643 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
646 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
647 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
651 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
654 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
656 * Call from buf_queue(), videobuf_queue_ops callback.
658 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
660 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
661 struct v4l2_m2m_queue_ctx *q_ctx;
664 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
668 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
669 list_add_tail(&b->list, &q_ctx->rdy_queue);
671 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
673 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);