drm: Protect dev->filelist with its own mutex
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gem.c
index d20c2a8..a087b96 100644 (file)
@@ -26,6 +26,7 @@
  *          Jerome Glisse
  */
 #include <linux/ktime.h>
+#include <linux/pagemap.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -83,24 +84,32 @@ retry:
                return r;
        }
        *obj = &robj->gem_base;
-       robj->pid = task_pid_nr(current);
-
-       mutex_lock(&adev->gem.mutex);
-       list_add_tail(&robj->list, &adev->gem.objects);
-       mutex_unlock(&adev->gem.mutex);
 
        return 0;
 }
 
-int amdgpu_gem_init(struct amdgpu_device *adev)
+void amdgpu_gem_force_release(struct amdgpu_device *adev)
 {
-       INIT_LIST_HEAD(&adev->gem.objects);
-       return 0;
-}
+       struct drm_device *ddev = adev->ddev;
+       struct drm_file *file;
 
-void amdgpu_gem_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_force_delete(adev);
+       mutex_lock(&ddev->filelist_mutex);
+
+       list_for_each_entry(file, &ddev->filelist, lhead) {
+               struct drm_gem_object *gobj;
+               int handle;
+
+               WARN_ONCE(1, "Still active user space clients!\n");
+               spin_lock(&file->table_lock);
+               idr_for_each_entry(&file->object_idr, gobj, handle) {
+                       WARN_ONCE(1, "And also active allocations!\n");
+                       drm_gem_object_unreference_unlocked(gobj);
+               }
+               idr_destroy(&file->object_idr);
+               spin_unlock(&file->table_lock);
+       }
+
+       mutex_unlock(&ddev->filelist_mutex);
 }
 
 /*
@@ -132,25 +141,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
 void amdgpu_gem_object_close(struct drm_gem_object *obj,
                             struct drm_file *file_priv)
 {
-       struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
-       struct amdgpu_device *adev = rbo->adev;
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+       struct amdgpu_device *adev = bo->adev;
        struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
+
+       struct amdgpu_bo_list_entry vm_pd;
+       struct list_head list, duplicates;
+       struct ttm_validate_buffer tv;
+       struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
        int r;
-       r = amdgpu_bo_reserve(rbo, true);
+
+       INIT_LIST_HEAD(&list);
+       INIT_LIST_HEAD(&duplicates);
+
+       tv.bo = &bo->tbo;
+       tv.shared = true;
+       list_add(&tv.head, &list);
+
+       amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
                return;
        }
-       bo_va = amdgpu_vm_bo_find(vm, rbo);
+       bo_va = amdgpu_vm_bo_find(vm, bo);
        if (bo_va) {
                if (--bo_va->ref_count == 0) {
                        amdgpu_vm_bo_rmv(adev, bo_va);
                }
        }
-       amdgpu_bo_unreserve(rbo);
+       ttm_eu_backoff_reservation(&ticket, &list);
 }
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -235,12 +259,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
            AMDGPU_GEM_USERPTR_REGISTER))
                return -EINVAL;
 
-       if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
-            !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
-            !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
+       if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
+            !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 
-               /* if we want to write to it we must require anonymous
-                  memory and install a MMU notifier */
+               /* if we want to write to it we must install a MMU notifier */
                return -EACCES;
        }
 
@@ -252,6 +274,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                goto handle_lockup;
 
        bo = gem_to_amdgpu_bo(gobj);
+       bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+       bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
        r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
        if (r)
                goto release_object;
@@ -264,18 +288,23 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
                down_read(&current->mm->mmap_sem);
+
+               r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
+                                                bo->tbo.ttm->pages);
+               if (r)
+                       goto unlock_mmap_sem;
+
                r = amdgpu_bo_reserve(bo, true);
-               if (r) {
-                       up_read(&current->mm->mmap_sem);
-                       goto release_object;
-               }
+               if (r)
+                       goto free_pages;
 
                amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
                amdgpu_bo_unreserve(bo);
-               up_read(&current->mm->mmap_sem);
                if (r)
-                       goto release_object;
+                       goto free_pages;
+
+               up_read(&current->mm->mmap_sem);
        }
 
        r = drm_gem_handle_create(filp, gobj, &handle);
@@ -287,6 +316,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        args->handle = handle;
        return 0;
 
+free_pages:
+       release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+
+unlock_mmap_sem:
+       up_read(&current->mm->mmap_sem);
+
 release_object:
        drm_gem_object_unreference_unlocked(gobj);
 
@@ -308,7 +343,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
                return -ENOENT;
        }
        robj = gem_to_amdgpu_bo(gobj);
-       if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
+       if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
            (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
                drm_gem_object_unreference_unlocked(gobj);
                return -EPERM;
@@ -559,11 +594,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        tv.shared = true;
        list_add(&tv.head, &list);
 
-       if (args->operation == AMDGPU_VA_OP_MAP) {
-               tv_pd.bo = &fpriv->vm.page_directory->tbo;
-               tv_pd.shared = true;
-               list_add(&tv_pd.head, &list);
-       }
+       tv_pd.bo = &fpriv->vm.page_directory->tbo;
+       tv_pd.shared = true;
+       list_add(&tv_pd.head, &list);
+
        r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
        if (r) {
                drm_gem_object_unreference_unlocked(gobj);
@@ -629,7 +663,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 
                info.bo_size = robj->gem_base.size;
                info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
-               info.domains = robj->initial_domain;
+               info.domains = robj->prefered_domains;
                info.domain_flags = robj->flags;
                amdgpu_bo_unreserve(robj);
                if (copy_to_user(out, &info, sizeof(info)))
@@ -637,14 +671,18 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
                break;
        }
        case AMDGPU_GEM_OP_SET_PLACEMENT:
-               if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
+               if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
                        r = -EPERM;
                        amdgpu_bo_unreserve(robj);
                        break;
                }
-               robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
-                                                     AMDGPU_GEM_DOMAIN_GTT |
-                                                     AMDGPU_GEM_DOMAIN_CPU);
+               robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+                                                       AMDGPU_GEM_DOMAIN_GTT |
+                                                       AMDGPU_GEM_DOMAIN_CPU);
+               robj->allowed_domains = robj->prefered_domains;
+               if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+                       robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
                amdgpu_bo_unreserve(robj);
                break;
        default:
@@ -689,38 +727,73 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 }
 
 #if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+{
+       struct drm_gem_object *gobj = ptr;
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+       struct seq_file *m = data;
+
+       unsigned domain;
+       const char *placement;
+       unsigned pin_count;
+
+       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       switch (domain) {
+       case AMDGPU_GEM_DOMAIN_VRAM:
+               placement = "VRAM";
+               break;
+       case AMDGPU_GEM_DOMAIN_GTT:
+               placement = " GTT";
+               break;
+       case AMDGPU_GEM_DOMAIN_CPU:
+       default:
+               placement = " CPU";
+               break;
+       }
+       seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
+                  id, amdgpu_bo_size(bo), placement,
+                  amdgpu_bo_gpu_offset(bo));
+
+       pin_count = ACCESS_ONCE(bo->pin_count);
+       if (pin_count)
+               seq_printf(m, " pin count %d", pin_count);
+       seq_printf(m, "\n");
+
+       return 0;
+}
+
 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_bo *rbo;
-       unsigned i = 0;
+       struct drm_file *file;
+       int r;
+
+       r = mutex_lock_interruptible(&dev->filelist_mutex);
+       if (r)
+               return r;
 
-       mutex_lock(&adev->gem.mutex);
-       list_for_each_entry(rbo, &adev->gem.objects, list) {
-               unsigned domain;
-               const char *placement;
+       list_for_each_entry(file, &dev->filelist, lhead) {
+               struct task_struct *task;
 
-               domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
-               switch (domain) {
-               case AMDGPU_GEM_DOMAIN_VRAM:
-                       placement = "VRAM";
-                       break;
-               case AMDGPU_GEM_DOMAIN_GTT:
-                       placement = " GTT";
-                       break;
-               case AMDGPU_GEM_DOMAIN_CPU:
-               default:
-                       placement = " CPU";
-                       break;
-               }
-               seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
-                          i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
-                          placement, (unsigned long)rbo->pid);
-               i++;
+               /*
+                * Although we have a valid reference on file->pid, that does
+                * not guarantee that the task_struct who called get_pid() is
+                * still alive (e.g. get_pid(current) => fork() => exit()).
+                * Therefore, we need to protect this ->comm access using RCU.
+                */
+               rcu_read_lock();
+               task = pid_task(file->pid, PIDTYPE_PID);
+               seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+                          task ? task->comm : "<unknown>");
+               rcu_read_unlock();
+
+               spin_lock(&file->table_lock);
+               idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
+               spin_unlock(&file->table_lock);
        }
-       mutex_unlock(&adev->gem.mutex);
+
+       mutex_unlock(&dev->filelist_mutex);
        return 0;
 }