#include "drmP.h"
#include "drm.h"
+#include <linux/completion.h>
+#include <linux/kds.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
+#define gem_get_name(o) ((o) ? o->name : -1)
+#define exynos_gem_get_name(o) ((o) ? (o)->base.name : -1)
+
+#ifdef CONFIG_DRM_EXYNOS_DEBUG
+static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
+{
+ struct exynos_drm_private *dev_priv = obj->dev->dev_private;
+
+ atomic_inc(&dev_priv->mm.object_count);
+ atomic_add(obj->size, &dev_priv->mm.object_memory);
+}
+
+static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
+{
+ struct exynos_drm_private *dev_priv = obj->dev->dev_private;
+
+ atomic_dec(&dev_priv->mm.object_count);
+ atomic_sub(obj->size, &dev_priv->mm.object_memory);
+}
+#else
+static void exynos_gem_info_add_obj(struct drm_gem_object *obj)
+{
+}
+
+static void exynos_gem_info_remove_obj(struct drm_gem_object *obj)
+{
+}
+#endif
+
static unsigned int convert_to_vm_err_msg(int msg)
{
unsigned int out_msg;
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct inode *inode;
- struct address_space *mapping;
struct page *p, **pages;
int i, npages;
- /* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
-
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(p))
+ p = alloc_page(gfpmask);
+ if (!p)
goto fail;
pages[i] = p;
}
+ exynos_gem_info_add_obj(obj);
+
return pages;
fail:
while (i--)
- page_cache_release(pages[i]);
+ __free_page(pages[i]);
drm_free_large(pages);
return ERR_PTR(PTR_ERR(p));
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages,
- bool dirty, bool accessed)
+ struct page **pages)
{
int i, npages;
- npages = obj->size >> PAGE_SHIFT;
+ exynos_gem_info_remove_obj(obj);
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
+ npages = obj->size >> PAGE_SHIFT;
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
+ for (i = 0; i < npages; i++)
+ __free_page(pages[i]);
drm_free_large(pages);
}
sgl = sg_next(sgl);
}
- /* Map the SGT to create a IOMMU mapping for this buffer */
- ret = dma_map_sg(obj->dev->dev, buf->sgt->sgl, buf->sgt->orig_nents, DMA_BIDIRECTIONAL);
- if (!ret) {
- DRM_ERROR("failed to map sg\n");
- ret = -ENOMEM;
- goto err1;
- }
- buf->dma_addr = buf->sgt->sgl->dma_address;
-
/* add some codes for UNCACHED type here. TODO */
buf->pages = pages;
kfree(buf->sgt);
buf->sgt = NULL;
err:
- exynos_gem_put_pages(obj, pages, true, false);
+ exynos_gem_put_pages(obj, pages);
return ret;
}
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- /* Unmap the SGT to remove the IOMMU mapping created for this buffer */
- dma_unmap_sg(obj->dev->dev, buf->sgt->sgl, buf->sgt->orig_nents, DMA_BIDIRECTIONAL);
-
/*
* if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
* allocated at gem fault handler.
kfree(buf->sgt);
buf->sgt = NULL;
- exynos_gem_put_pages(obj, buf->pages, true, false);
+ exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
/* add some codes for UNCACHED type here. TODO */
{
struct drm_gem_object *obj;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[GEM:%d]\n", exynos_gem_get_name(exynos_gem_obj));
if (!exynos_gem_obj)
return;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+ if (exynos_gem_obj->resource_set != NULL) {
+ /* kds_resource_set_release NULLs the pointer */
+ kds_resource_set_release(&exynos_gem_obj->resource_set);
+ }
+
if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
exynos_gem_obj->buffer->pages)
exynos_drm_gem_put_pages(obj);
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
- exynos_gem_obj = NULL;
}
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
+ DRM_DEBUG_KMS("[DEV:%s] size: %lu\n", dev->devname, size);
+
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object\n");
return NULL;
}
- exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
ret = drm_gem_object_init(dev, obj, size);
struct exynos_drm_gem_buf *buf;
int ret;
+ DRM_DEBUG_KMS("[DEV:%s] flags: 0x%x size: %lu\n", dev->devname, flags,
+ size);
+
if (!size) {
DRM_ERROR("invalid size.\n");
return ERR_PTR(-EINVAL);
}
size = roundup_gem_size(size, flags);
- DRM_DEBUG_KMS("%s\n", __FILE__);
ret = check_gem_flags(flags);
if (ret)
* allocate all pages as desired size if user wants to allocate
* physically non-continuous memory.
*/
- if (flags & EXYNOS_BO_NONCONTIG) {
+ if (flags & EXYNOS_BO_NONCONTIG)
ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
+ else
ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[DEV:%s] flags: 0x%x size: %llu\n", dev->devname,
+ args->flags, args->size);
exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
if (IS_ERR(exynos_gem_obj))
return 0;
}
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return ERR_PTR(-EINVAL);
- }
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
- return &exynos_gem_obj->buffer->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return;
- }
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
-
- /* TODO */
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
-
- /*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
- */
- drm_gem_object_unreference_unlocked(obj);
-}
-
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
- args->handle, (unsigned long)args->offset);
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u, offset: 0x%llx\n",
+ dev->devname, args->handle, args->offset);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[GEM:%d] vma->pgoff: 0x%lx vma: 0x%lx -> 0x%lx\n",
+ gem_get_name(obj), vma->vm_pgoff, vma->vm_start,
+ vma->vm_end);
vma->vm_flags |= (VM_IO | VM_RESERVED);
struct drm_gem_object *obj;
unsigned int addr;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u size: %u\n", dev->devname,
+ args->handle, args->size);
if (!(dev->driver->driver_features & DRIVER_GEM)) {
DRM_ERROR("does not support GEM.\n");
return 0;
}
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
+static void cpu_acquire_kds_cb_fn(void *param1, void *param2)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct completion* completion = (struct completion *)param1;
+ complete(completion);
+}
+
+int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_gem_cpu_acquire *args = data;
+ struct exynos_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ struct kds_resource *kds;
+ struct kds_resource_set *rset;
+ unsigned long exclusive;
+ struct kds_callback callback;
+ DECLARE_COMPLETION_ONSTACK(completion);
+#endif
+ struct exynos_drm_gem_obj_node *gem_node;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u flags: 0x%x\n", dev->devname,
+ args->handle, args->flags);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (exynos_gem_obj->base.export_dma_buf == NULL) {
+ /* If there is no dmabuf present, there is no cross-process/
+ * cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+ exclusive = 0;
+ if ((args->flags & DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE) != 0)
+ exclusive = 1;
+ kds = &exynos_gem_obj->base.export_dma_buf->kds;
+ kds_callback_init(&callback, 1, &cpu_acquire_kds_cb_fn);
+ ret = kds_async_waitall(&rset, KDS_FLAG_LOCKED_WAIT, &callback,
+ &completion, NULL, 1, &exclusive, &kds);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!IS_ERR_VALUE(ret))
+ ret = wait_for_completion_interruptible(&completion);
+ kds_callback_term(&callback);
+
+ mutex_lock(&dev->struct_mutex);
+ if (IS_ERR_VALUE(ret))
+ goto release_rset;
+#endif
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ DRM_ERROR("failed to allocate eyxnos_drm_gem_obj_node.\n");
+ ret = -ENOMEM;
+ goto release_rset;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ exynos_gem_obj->resource_set = rset;
+#endif
+
+ gem_node->exynos_gem_obj = exynos_gem_obj;
+ list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
+ mutex_unlock(&dev->struct_mutex);
return 0;
+
+
+release_rset:
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ kds_resource_set_release_sync(&rset);
+#endif
+
+unref_obj:
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file)
+{
+ struct drm_exynos_gem_cpu_release *args = data;
+ struct exynos_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct list_head *cur;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u\n", dev->devname, args->handle);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (exynos_gem_obj->base.export_dma_buf == NULL) {
+ /* If there is no dmabuf present, there is no cross-process/
+ * cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+#endif
+
+ list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
+ struct exynos_drm_gem_obj_node *node = list_entry(
+ cur, struct exynos_drm_gem_obj_node, list);
+ if (node->exynos_gem_obj == exynos_gem_obj)
+ break;
+ }
+ if (cur == &file_priv->gem_cpu_acquire_list) {
+ DRM_ERROR("gem object not acquired for current process.\n");
+ ret = -EINVAL;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ /* kds_resource_set_release NULLs the pointer */
+ BUG_ON(exynos_gem_obj->resource_set == NULL);
+ kds_resource_set_release(&exynos_gem_obj->resource_set);
+#endif
+
+ list_del(cur);
+ kfree(list_entry(cur, struct exynos_drm_gem_obj_node, list));
+ /* unreference for the reference held since cpu_acquire_ioctl */
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+unref_obj:
+ /* unreference for the reference from drm_gem_object_lookup() */
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buf;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[GEM:%d]\n", gem_get_name(obj));
exynos_gem_obj = to_exynos_gem_obj(obj);
buf = exynos_gem_obj->buffer;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[DEV:%s] %ux%u bpp: %u flags: 0x%x\n", dev->devname,
+ args->height, args->width, args->bpp, args->flags);
+
+ if (!(args->flags & EXYNOS_BO_NONCONTIG)) {
+ DRM_ERROR("contig buffer allocation not supported.\n");
+ /*
+ * HACK: Currently we do not support CONTIG buffer
+ * allocation from user space. The drm framework
+ * supports non-contig buffers only. In the next versions
+ * the option to choose contig/non-contig buffers itself
+ * is not supported through this flag. For now, we just
+ * return error.
+ */
+ return -EINVAL;
+ }
/*
* alocate memory to be used for framebuffer.
*/
args->pitch = args->width * ALIGN(args->bpp, 8) >> 3;
+ args->pitch = round_up(args->pitch, 64);
args->size = PAGE_ALIGN(args->pitch * args->height);
struct drm_gem_object *obj;
int ret = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u\n", dev->devname, handle);
mutex_lock(&dev->struct_mutex);
{
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("[DEV:%s] handle: %u\n", dev->devname, handle);
/*
* obj->refcount and obj->handle_count are decreased and
pgoff_t page_offset;
int ret;
+ DRM_DEBUG_DRIVER("vma->pgoff: 0x%lx vmf->pgoff: 0x%lx vmf->vaddr: 0x%p\n",
+ vma->vm_pgoff, (unsigned long)vmf->pgoff,
+ vmf->virtual_address);
+
page_offset = ((unsigned long)vmf->virtual_address -
vma->vm_start) >> PAGE_SHIFT;
f_vaddr = (unsigned long)vmf->virtual_address;
{
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ DRM_DEBUG_KMS("pgoff: 0x%lx\n", vma->vm_pgoff);
/* set vm_area_struct. */
ret = drm_gem_mmap(filp, vma);