CHROMIUM: dma-buf/kds: allow KDS to be compiled out if dma-buf is enabled
[cascardo/linux.git] / drivers / base / dma-buf.c
index 07cbbc6..4defa66 100644 (file)
 #include <linux/dma-buf.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+#include <linux/poll.h>
+#include <linux/sched.h>
+#endif
 
 static inline int is_dma_buf_file(struct file *);
 
@@ -40,12 +44,120 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        dmabuf = file->private_data;
 
        dmabuf->ops->release(dmabuf);
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       kds_callback_term(&dmabuf->kds_cb);
+       kds_resource_term(&dmabuf->kds);
+#endif
        kfree(dmabuf);
        return 0;
 }
 
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+       struct dma_buf *dmabuf;
+
+       if (!is_dma_buf_file(file))
+               return -EINVAL;
+
+       dmabuf = file->private_data;
+
+       /* check for overflowing the buffer's size */
+       if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       return dmabuf->ops->mmap(dmabuf, vma);
+}
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+static void dma_buf_kds_cb_fn(void *param1, void *param2)
+{
+       struct kds_resource_set **rset_ptr = param1;
+       struct kds_resource_set *rset = *rset_ptr;
+       wait_queue_head_t *wait_queue = param2;
+
+       kfree(rset_ptr);
+       kds_resource_set_release(&rset);
+       wake_up(wait_queue);
+}
+
+static int dma_buf_kds_check(struct kds_resource *kds,
+               unsigned long exclusive, int *poll_ret)
+{
+       /* Synchronous wait with 0 timeout - poll availability */
+       struct kds_resource_set *rset = kds_waitall(1, &exclusive, &kds, 0);
+
+       if (IS_ERR(rset))
+               return POLLERR;
+
+       if (rset) {
+               kds_resource_set_release(&rset);
+               *poll_ret = POLLIN | POLLRDNORM;
+               if (exclusive)
+                       *poll_ret |=  POLLOUT | POLLWRNORM;
+               return 1;
+       } else {
+               return 0;
+       }
+}
+
+static unsigned int dma_buf_poll(struct file *file,
+               struct poll_table_struct *wait)
+{
+       struct dma_buf *dmabuf;
+       struct kds_resource *kds;
+       unsigned int ret = 0;
+
+       if (!is_dma_buf_file(file))
+               return POLLERR;
+
+       dmabuf = file->private_data;
+       kds    = &dmabuf->kds;
+
+       if (poll_does_not_wait(wait)) {
+               /* Check for exclusive access (superset of shared) first */
+               if (!dma_buf_kds_check(kds, 1ul, &ret))
+                       dma_buf_kds_check(kds, 0ul, &ret);
+       } else {
+               int events = poll_requested_events(wait);
+               unsigned long exclusive;
+               wait_queue_head_t *wq;
+               struct kds_resource_set **rset_ptr =
+                               kmalloc(sizeof(*rset_ptr), GFP_KERNEL);
+
+               if (!rset_ptr)
+                       return POLL_ERR;
+
+               if (events & POLLOUT) {
+                       wq = &dmabuf->wq_exclusive;
+                       exclusive = 1;
+               } else {
+                       wq = &dmabuf->wq_shared;
+                       exclusive = 0;
+               }
+               poll_wait(file, wq, wait);
+               ret = kds_async_waitall(rset_ptr, KDS_FLAG_LOCKED_WAIT,
+                               &dmabuf->kds_cb, rset_ptr, wq, 1, &exclusive,
+                               &kds);
+
+               if (IS_ERR_VALUE(ret)) {
+                       ret = POLL_ERR;
+                       kfree(rset_ptr);
+               } else {
+                       /* Can't allow access until callback */
+                       ret = 0;
+               }
+       }
+       return ret;
+}
+#endif
+
 static const struct file_operations dma_buf_fops = {
        .release        = dma_buf_release,
+       .mmap           = dma_buf_mmap_internal,
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       .poll           = dma_buf_poll,
+#endif
 };
 
 /*
@@ -82,7 +194,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
                          || !ops->unmap_dma_buf
                          || !ops->release
                          || !ops->kmap_atomic
-                         || !ops->kmap)) {
+                         || !ops->kmap
+                         || !ops->mmap)) {
                return ERR_PTR(-EINVAL);
        }
 
@@ -101,6 +214,13 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
        mutex_init(&dmabuf->lock);
        INIT_LIST_HEAD(&dmabuf->attachments);
 
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+       init_waitqueue_head(&dmabuf->wq_exclusive);
+       init_waitqueue_head(&dmabuf->wq_shared);
+       kds_resource_init(&dmabuf->kds);
+       kds_callback_init(&dmabuf->kds_cb, 1, dma_buf_kds_cb_fn);
+#endif
+
        return dmabuf;
 }
 EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -406,3 +526,90 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
                dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf:    [in]    buffer that should back the vma
+ * @vma:       [in]    vma for the mmap
+ * @pgoff:     [in]    offset in pages where this mmap should start within the
+ *                     dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+                unsigned long pgoff)
+{
+       struct file *oldfile;
+       int ret;
+
+       if (WARN_ON(!dmabuf || !vma))
+               return -EINVAL;
+
+       /* check for offset overflow */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+               return -EOVERFLOW;
+
+       /* check for overflowing the buffer's size */
+       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+           dmabuf->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       /* readjust the vma */
+       get_file(dmabuf->file);
+       oldfile = vma->vm_file;
+       vma->vm_file = dmabuf->file;
+       vma->vm_pgoff = pgoff;
+
+       ret = dmabuf->ops->mmap(dmabuf, vma);
+       if (ret) {
+               /* restore old parameters on failure */
+               vma->vm_file = oldfile;
+               fput(dmabuf->file);
+       } else {
+               if (oldfile)
+                       fput(oldfile);
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf:    [in]    buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+       if (WARN_ON(!dmabuf))
+               return NULL;
+
+       if (dmabuf->ops->vmap)
+               return dmabuf->ops->vmap(dmabuf);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf:    [in]    buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+       if (WARN_ON(!dmabuf))
+               return;
+
+       if (dmabuf->ops->vunmap)
+               dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);