CHROMIUM: dma-buf/kds: allow KDS to be compiled out if dma-buf is enabled
[cascardo/linux.git] / drivers / base / dma-buf.c
index 20258e1..4defa66 100644 (file)
 #include <linux/dma-buf.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+#include <linux/poll.h>
+#include <linux/sched.h>
+#endif
 
 static inline int is_dma_buf_file(struct file *);
 
@@ -40,6 +44,10 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        dmabuf = file->private_data;
 
        dmabuf->ops->release(dmabuf);
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       kds_callback_term(&dmabuf->kds_cb);
+       kds_resource_term(&dmabuf->kds);
+#endif
        kfree(dmabuf);
        return 0;
 }
@@ -61,9 +69,95 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
        return dmabuf->ops->mmap(dmabuf, vma);
 }
 
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+static void dma_buf_kds_cb_fn(void *param1, void *param2)
+{
+       struct kds_resource_set **rset_ptr = param1;
+       struct kds_resource_set *rset = *rset_ptr;
+       wait_queue_head_t *wait_queue = param2;
+
+       kfree(rset_ptr);
+       kds_resource_set_release(&rset);
+       wake_up(wait_queue);
+}
+
+static int dma_buf_kds_check(struct kds_resource *kds,
+               unsigned long exclusive, int *poll_ret)
+{
+       /* Synchronous wait with 0 timeout - poll availability */
+       struct kds_resource_set *rset = kds_waitall(1, &exclusive, &kds, 0);
+
+       if (IS_ERR(rset))
+               return POLLERR;
+
+       if (rset) {
+               kds_resource_set_release(&rset);
+               *poll_ret = POLLIN | POLLRDNORM;
+               if (exclusive)
+                       *poll_ret |=  POLLOUT | POLLWRNORM;
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+static unsigned int dma_buf_poll(struct file *file,
+               struct poll_table_struct *wait)
+{
+       struct dma_buf *dmabuf;
+       struct kds_resource *kds;
+       unsigned int ret = 0;
+
+       if (!is_dma_buf_file(file))
+               return POLLERR;
+
+       dmabuf = file->private_data;
+       kds    = &dmabuf->kds;
+
+       if (poll_does_not_wait(wait)) {
+               /* Check for exclusive access (superset of shared) first */
+               if (!dma_buf_kds_check(kds, 1ul, &ret))
+                       dma_buf_kds_check(kds, 0ul, &ret);
+       } else {
+               int events = poll_requested_events(wait);
+               unsigned long exclusive;
+               wait_queue_head_t *wq;
+               struct kds_resource_set **rset_ptr =
+                               kmalloc(sizeof(*rset_ptr), GFP_KERNEL);
+
+               if (!rset_ptr)
+                       return POLL_ERR;
+
+               if (events & POLLOUT) {
+                       wq = &dmabuf->wq_exclusive;
+                       exclusive = 1;
+               } else {
+                       wq = &dmabuf->wq_shared;
+                       exclusive = 0;
+               }
+               poll_wait(file, wq, wait);
+               ret = kds_async_waitall(rset_ptr, KDS_FLAG_LOCKED_WAIT,
+                               &dmabuf->kds_cb, rset_ptr, wq, 1, &exclusive,
+                               &kds);
+
+               if (IS_ERR_VALUE(ret)) {
+                       ret = POLL_ERR;
+                       kfree(rset_ptr);
+               } else {
+                       /* Can't allow access until callback */
+                       ret = 0;
+               }
+       }
+       return ret;
+}
+#endif
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       .poll           = dma_buf_poll,
+#endif
 };
 
 /*
@@ -120,6 +214,13 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
        mutex_init(&dmabuf->lock);
        INIT_LIST_HEAD(&dmabuf->attachments);
 
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       init_waitqueue_head(&dmabuf->wq_exclusive);
+       init_waitqueue_head(&dmabuf->wq_shared);
+       kds_resource_init(&dmabuf->kds);
+       kds_callback_init(&dmabuf->kds_cb, 1, dma_buf_kds_cb_fn);
+#endif
+
        return dmabuf;
 }
 EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -444,6 +545,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                 unsigned long pgoff)
 {
+       struct file *oldfile;
+       int ret;
+
        if (WARN_ON(!dmabuf || !vma))
                return -EINVAL;
 
@@ -457,15 +561,21 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                return -EINVAL;
 
        /* readjust the vma */
-       if (vma->vm_file)
-               fput(vma->vm_file);
-
+       get_file(dmabuf->file);
+       oldfile = vma->vm_file;
        vma->vm_file = dmabuf->file;
-       get_file(vma->vm_file);
-
        vma->vm_pgoff = pgoff;
 
-       return dmabuf->ops->mmap(dmabuf, vma);
+       ret = dmabuf->ops->mmap(dmabuf, vma);
+       if (ret) {
+               /* restore old parameters on failure */
+               vma->vm_file = oldfile;
+               fput(dmabuf->file);
+       } else {
+               if (oldfile)
+                       fput(oldfile);
+       }
+       return ret;
 }
 EXPORT_SYMBOL_GPL(dma_buf_mmap);