2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_mocs.h"
36 #include <linux/shmem_fs.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/pci.h>
40 #include <linux/dma-buf.h>
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
45 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
47 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
49 static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
52 return HAS_LLC(dev) || level != I915_CACHE_NONE;
55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
57 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
60 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
63 return obj->pin_display;
67 insert_mappable_node(struct drm_i915_private *i915,
68 struct drm_mm_node *node, u32 size)
70 memset(node, 0, sizeof(*node));
71 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
73 i915->ggtt.mappable_end,
74 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
79 remove_mappable_node(struct drm_mm_node *node)
81 drm_mm_remove_node(node);
84 /* some bookkeeping */
85 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88 spin_lock(&dev_priv->mm.object_stat_lock);
89 dev_priv->mm.object_count++;
90 dev_priv->mm.object_memory += size;
91 spin_unlock(&dev_priv->mm.object_stat_lock);
94 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97 spin_lock(&dev_priv->mm.object_stat_lock);
98 dev_priv->mm.object_count--;
99 dev_priv->mm.object_memory -= size;
100 spin_unlock(&dev_priv->mm.object_stat_lock);
104 i915_gem_wait_for_error(struct i915_gpu_error *error)
108 if (!i915_reset_in_progress(error))
112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
113 * userspace. If it takes that long something really bad is going on and
114 * we should simply try to bail out and fail as gracefully as possible.
116 ret = wait_event_interruptible_timeout(error->reset_queue,
117 !i915_reset_in_progress(error),
120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
122 } else if (ret < 0) {
129 int i915_mutex_lock_interruptible(struct drm_device *dev)
131 struct drm_i915_private *dev_priv = dev->dev_private;
134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
138 ret = mutex_lock_interruptible(&dev->struct_mutex);
142 WARN_ON(i915_verify_lists(dev));
147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file)
150 struct drm_i915_private *dev_priv = to_i915(dev);
151 struct i915_ggtt *ggtt = &dev_priv->ggtt;
152 struct drm_i915_gem_get_aperture *args = data;
153 struct i915_vma *vma;
157 mutex_lock(&dev->struct_mutex);
158 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
160 pinned += vma->node.size;
161 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
163 pinned += vma->node.size;
164 mutex_unlock(&dev->struct_mutex);
166 args->aper_size = ggtt->base.total;
167 args->aper_available_size = args->aper_size - pinned;
173 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
175 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
176 char *vaddr = obj->phys_handle->vaddr;
178 struct scatterlist *sg;
181 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
184 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
188 page = shmem_read_mapping_page(mapping, i);
190 return PTR_ERR(page);
192 src = kmap_atomic(page);
193 memcpy(vaddr, src, PAGE_SIZE);
194 drm_clflush_virt_range(vaddr, PAGE_SIZE);
201 i915_gem_chipset_flush(to_i915(obj->base.dev));
203 st = kmalloc(sizeof(*st), GFP_KERNEL);
207 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
214 sg->length = obj->base.size;
216 sg_dma_address(sg) = obj->phys_handle->busaddr;
217 sg_dma_len(sg) = obj->base.size;
224 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
228 BUG_ON(obj->madv == __I915_MADV_PURGED);
230 ret = i915_gem_object_set_to_cpu_domain(obj, true);
232 /* In the event of a disaster, abandon all caches and
235 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
238 if (obj->madv == I915_MADV_DONTNEED)
242 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
243 char *vaddr = obj->phys_handle->vaddr;
246 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
250 page = shmem_read_mapping_page(mapping, i);
254 dst = kmap_atomic(page);
255 drm_clflush_virt_range(vaddr, PAGE_SIZE);
256 memcpy(dst, vaddr, PAGE_SIZE);
259 set_page_dirty(page);
260 if (obj->madv == I915_MADV_WILLNEED)
261 mark_page_accessed(page);
268 sg_free_table(obj->pages);
273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
275 drm_pci_free(obj->base.dev, obj->phys_handle);
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279 .get_pages = i915_gem_object_get_pages_phys,
280 .put_pages = i915_gem_object_put_pages_phys,
281 .release = i915_gem_object_release_phys,
285 drop_pages(struct drm_i915_gem_object *obj)
287 struct i915_vma *vma, *next;
290 drm_gem_object_reference(&obj->base);
291 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
292 if (i915_vma_unbind(vma))
295 ret = i915_gem_object_put_pages(obj);
296 drm_gem_object_unreference(&obj->base);
302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
305 drm_dma_handle_t *phys;
308 if (obj->phys_handle) {
309 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
315 if (obj->madv != I915_MADV_WILLNEED)
318 if (obj->base.filp == NULL)
321 ret = drop_pages(obj);
325 /* create a new object */
326 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
330 obj->phys_handle = phys;
331 obj->ops = &i915_gem_phys_ops;
333 return i915_gem_object_get_pages(obj);
337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338 struct drm_i915_gem_pwrite *args,
339 struct drm_file *file_priv)
341 struct drm_device *dev = obj->base.dev;
342 void *vaddr = obj->phys_handle->vaddr + args->offset;
343 char __user *user_data = u64_to_user_ptr(args->data_ptr);
346 /* We manually control the domain here and pretend that it
347 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
349 ret = i915_gem_object_wait_rendering(obj, false);
353 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
354 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355 unsigned long unwritten;
357 /* The physical object once assigned is fixed for the lifetime
358 * of the obj, so we can safely drop the lock and continue
361 mutex_unlock(&dev->struct_mutex);
362 unwritten = copy_from_user(vaddr, user_data, args->size);
363 mutex_lock(&dev->struct_mutex);
370 drm_clflush_virt_range(vaddr, args->size);
371 i915_gem_chipset_flush(to_i915(dev));
374 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
378 void *i915_gem_object_alloc(struct drm_device *dev)
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
384 void i915_gem_object_free(struct drm_i915_gem_object *obj)
386 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
387 kmem_cache_free(dev_priv->objects, obj);
391 i915_gem_create(struct drm_file *file,
392 struct drm_device *dev,
396 struct drm_i915_gem_object *obj;
400 size = roundup(size, PAGE_SIZE);
404 /* Allocate the new object */
405 obj = i915_gem_object_create(dev, size);
409 ret = drm_gem_handle_create(file, &obj->base, &handle);
410 /* drop reference from allocate - handle holds it now */
411 drm_gem_object_unreference_unlocked(&obj->base);
420 i915_gem_dumb_create(struct drm_file *file,
421 struct drm_device *dev,
422 struct drm_mode_create_dumb *args)
424 /* have to work out size/pitch and return them */
425 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
426 args->size = args->pitch * args->height;
427 return i915_gem_create(file, dev,
428 args->size, &args->handle);
432 * Creates a new mm object and returns a handle to it.
433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
438 i915_gem_create_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *file)
441 struct drm_i915_gem_create *args = data;
443 return i915_gem_create(file, dev,
444 args->size, &args->handle);
448 __copy_to_user_swizzled(char __user *cpu_vaddr,
449 const char *gpu_vaddr, int gpu_offset,
452 int ret, cpu_offset = 0;
455 int cacheline_end = ALIGN(gpu_offset + 1, 64);
456 int this_length = min(cacheline_end - gpu_offset, length);
457 int swizzled_gpu_offset = gpu_offset ^ 64;
459 ret = __copy_to_user(cpu_vaddr + cpu_offset,
460 gpu_vaddr + swizzled_gpu_offset,
465 cpu_offset += this_length;
466 gpu_offset += this_length;
467 length -= this_length;
474 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
475 const char __user *cpu_vaddr,
478 int ret, cpu_offset = 0;
481 int cacheline_end = ALIGN(gpu_offset + 1, 64);
482 int this_length = min(cacheline_end - gpu_offset, length);
483 int swizzled_gpu_offset = gpu_offset ^ 64;
485 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
486 cpu_vaddr + cpu_offset,
491 cpu_offset += this_length;
492 gpu_offset += this_length;
493 length -= this_length;
500 * Pins the specified object's pages and synchronizes the object with
501 * GPU accesses. Sets needs_clflush to non-zero if the caller should
502 * flush the object from the CPU cache.
504 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
511 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
515 /* If we're not in the cpu read domain, set ourself into the gtt
516 * read domain and manually flush cachelines (if required). This
517 * optimizes for the case when the gpu will dirty the data
518 * anyway again before the next pread happens. */
519 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
521 ret = i915_gem_object_wait_rendering(obj, true);
526 ret = i915_gem_object_get_pages(obj);
530 i915_gem_object_pin_pages(obj);
535 /* Per-page copy function for the shmem pread fastpath.
536 * Flushes invalid cachelines before reading the target if
537 * needs_clflush is set. */
539 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
540 char __user *user_data,
541 bool page_do_bit17_swizzling, bool needs_clflush)
546 if (unlikely(page_do_bit17_swizzling))
549 vaddr = kmap_atomic(page);
551 drm_clflush_virt_range(vaddr + shmem_page_offset,
553 ret = __copy_to_user_inatomic(user_data,
554 vaddr + shmem_page_offset,
556 kunmap_atomic(vaddr);
558 return ret ? -EFAULT : 0;
562 shmem_clflush_swizzled_range(char *addr, unsigned long length,
565 if (unlikely(swizzled)) {
566 unsigned long start = (unsigned long) addr;
567 unsigned long end = (unsigned long) addr + length;
569 /* For swizzling simply ensure that we always flush both
570 * channels. Lame, but simple and it works. Swizzled
571 * pwrite/pread is far from a hotpath - current userspace
572 * doesn't use it at all. */
573 start = round_down(start, 128);
574 end = round_up(end, 128);
576 drm_clflush_virt_range((void *)start, end - start);
578 drm_clflush_virt_range(addr, length);
583 /* Only difference to the fast-path function is that this can handle bit17
584 * and uses non-atomic copy and kmap functions. */
586 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
587 char __user *user_data,
588 bool page_do_bit17_swizzling, bool needs_clflush)
595 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
597 page_do_bit17_swizzling);
599 if (page_do_bit17_swizzling)
600 ret = __copy_to_user_swizzled(user_data,
601 vaddr, shmem_page_offset,
604 ret = __copy_to_user(user_data,
605 vaddr + shmem_page_offset,
609 return ret ? - EFAULT : 0;
612 static inline unsigned long
613 slow_user_access(struct io_mapping *mapping,
614 uint64_t page_base, int page_offset,
615 char __user *user_data,
616 unsigned long length, bool pwrite)
618 void __iomem *ioaddr;
622 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr = (void __force *)ioaddr + page_offset;
626 unwritten = __copy_from_user(vaddr, user_data, length);
628 unwritten = __copy_to_user(user_data, vaddr, length);
630 io_mapping_unmap(ioaddr);
635 i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr)
639 struct drm_i915_private *dev_priv = dev->dev_private;
640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node;
642 char __user *user_data;
647 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
649 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
653 ret = i915_gem_object_get_pages(obj);
655 remove_mappable_node(&node);
659 i915_gem_object_pin_pages(obj);
661 node.start = i915_gem_obj_ggtt_offset(obj);
662 node.allocated = false;
663 ret = i915_gem_object_put_fence(obj);
668 ret = i915_gem_object_set_to_gtt_domain(obj, false);
672 user_data = u64_to_user_ptr(data_ptr);
674 offset = data_offset;
676 mutex_unlock(&dev->struct_mutex);
677 if (likely(!i915.prefault_disable)) {
678 ret = fault_in_multipages_writeable(user_data, remain);
680 mutex_lock(&dev->struct_mutex);
686 /* Operation in this page
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
692 u32 page_base = node.start;
693 unsigned page_offset = offset_in_page(offset);
694 unsigned page_length = PAGE_SIZE - page_offset;
695 page_length = remain < page_length ? remain : page_length;
696 if (node.allocated) {
698 ggtt->base.insert_page(&ggtt->base,
699 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
704 page_base += offset & PAGE_MASK;
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
710 if (slow_user_access(ggtt->mappable, page_base,
711 page_offset, user_data,
712 page_length, false)) {
717 remain -= page_length;
718 user_data += page_length;
719 offset += page_length;
722 mutex_lock(&dev->struct_mutex);
723 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
730 ret = i915_gem_object_set_to_gtt_domain(obj, false);
734 if (node.allocated) {
736 ggtt->base.clear_range(&ggtt->base,
737 node.start, node.size,
739 i915_gem_object_unpin_pages(obj);
740 remove_mappable_node(&node);
742 i915_gem_object_ggtt_unpin(obj);
749 i915_gem_shmem_pread(struct drm_device *dev,
750 struct drm_i915_gem_object *obj,
751 struct drm_i915_gem_pread *args,
752 struct drm_file *file)
754 char __user *user_data;
757 int shmem_page_offset, page_length, ret = 0;
758 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
760 int needs_clflush = 0;
761 struct sg_page_iter sg_iter;
763 if (!i915_gem_object_has_struct_page(obj))
766 user_data = u64_to_user_ptr(args->data_ptr);
769 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
771 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
775 offset = args->offset;
777 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
778 offset >> PAGE_SHIFT) {
779 struct page *page = sg_page_iter_page(&sg_iter);
784 /* Operation in this page
786 * shmem_page_offset = offset within page in shmem file
787 * page_length = bytes to copy for this page
789 shmem_page_offset = offset_in_page(offset);
790 page_length = remain;
791 if ((shmem_page_offset + page_length) > PAGE_SIZE)
792 page_length = PAGE_SIZE - shmem_page_offset;
794 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
795 (page_to_phys(page) & (1 << 17)) != 0;
797 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
798 user_data, page_do_bit17_swizzling,
803 mutex_unlock(&dev->struct_mutex);
805 if (likely(!i915.prefault_disable) && !prefaulted) {
806 ret = fault_in_multipages_writeable(user_data, remain);
807 /* Userspace is tricking us, but we've already clobbered
808 * its pages with the prefault and promised to write the
809 * data up to the first fault. Hence ignore any errors
810 * and just continue. */
815 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling,
819 mutex_lock(&dev->struct_mutex);
825 remain -= page_length;
826 user_data += page_length;
827 offset += page_length;
831 i915_gem_object_unpin_pages(obj);
837 * Reads data from the object referenced by handle.
838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
842 * On error, the contents of *data are undefined.
845 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
846 struct drm_file *file)
848 struct drm_i915_gem_pread *args = data;
849 struct drm_i915_gem_object *obj;
855 if (!access_ok(VERIFY_WRITE,
856 u64_to_user_ptr(args->data_ptr),
860 ret = i915_mutex_lock_interruptible(dev);
864 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
865 if (&obj->base == NULL) {
870 /* Bounds check source. */
871 if (args->offset > obj->base.size ||
872 args->size > obj->base.size - args->offset) {
877 trace_i915_gem_object_pread(obj, args->offset, args->size);
879 ret = i915_gem_shmem_pread(dev, obj, args, file);
881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV)
883 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr);
887 drm_gem_object_unreference(&obj->base);
889 mutex_unlock(&dev->struct_mutex);
893 /* This is the fast write path which cannot handle
894 * page faults in the source data
898 fast_user_write(struct io_mapping *mapping,
899 loff_t page_base, int page_offset,
900 char __user *user_data,
903 void __iomem *vaddr_atomic;
905 unsigned long unwritten;
907 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
908 /* We can use the cpu mem copy function because this is X86. */
909 vaddr = (void __force*)vaddr_atomic + page_offset;
910 unwritten = __copy_from_user_inatomic_nocache(vaddr,
912 io_mapping_unmap_atomic(vaddr_atomic);
917 * This is the fast pwrite path, where we copy the data directly from the
918 * user into the GTT, uncached.
919 * @dev: drm device pointer
920 * @obj: i915 gem object
921 * @args: pwrite arguments structure
922 * @file: drm file pointer
925 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
926 struct drm_i915_gem_object *obj,
927 struct drm_i915_gem_pwrite *args,
928 struct drm_file *file)
930 struct i915_ggtt *ggtt = &i915->ggtt;
931 struct drm_device *dev = obj->base.dev;
932 struct drm_mm_node node;
933 uint64_t remain, offset;
934 char __user *user_data;
936 bool hit_slow_path = false;
938 if (obj->tiling_mode != I915_TILING_NONE)
941 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
943 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
947 ret = i915_gem_object_get_pages(obj);
949 remove_mappable_node(&node);
953 i915_gem_object_pin_pages(obj);
955 node.start = i915_gem_obj_ggtt_offset(obj);
956 node.allocated = false;
957 ret = i915_gem_object_put_fence(obj);
962 ret = i915_gem_object_set_to_gtt_domain(obj, true);
966 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
969 user_data = u64_to_user_ptr(args->data_ptr);
970 offset = args->offset;
973 /* Operation in this page
975 * page_base = page offset within aperture
976 * page_offset = offset within page
977 * page_length = bytes to copy for this page
979 u32 page_base = node.start;
980 unsigned page_offset = offset_in_page(offset);
981 unsigned page_length = PAGE_SIZE - page_offset;
982 page_length = remain < page_length ? remain : page_length;
983 if (node.allocated) {
984 wmb(); /* flush the write before we modify the GGTT */
985 ggtt->base.insert_page(&ggtt->base,
986 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
987 node.start, I915_CACHE_NONE, 0);
988 wmb(); /* flush modifications to the GGTT (insert_page) */
990 page_base += offset & PAGE_MASK;
992 /* If we get a fault while copying data, then (presumably) our
993 * source page isn't available. Return the error and we'll
994 * retry in the slow path.
995 * If the object is non-shmem backed, we retry again with the
996 * path that handles page fault.
998 if (fast_user_write(ggtt->mappable, page_base,
999 page_offset, user_data, page_length)) {
1000 hit_slow_path = true;
1001 mutex_unlock(&dev->struct_mutex);
1002 if (slow_user_access(ggtt->mappable,
1004 page_offset, user_data,
1005 page_length, true)) {
1007 mutex_lock(&dev->struct_mutex);
1011 mutex_lock(&dev->struct_mutex);
1014 remain -= page_length;
1015 user_data += page_length;
1016 offset += page_length;
1020 if (hit_slow_path) {
1022 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1023 /* The user has modified the object whilst we tried
1024 * reading from it, and we now have no idea what domain
1025 * the pages should be in. As we have just been touching
1026 * them directly, flush everything back to the GTT
1029 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1033 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1035 if (node.allocated) {
1037 ggtt->base.clear_range(&ggtt->base,
1038 node.start, node.size,
1040 i915_gem_object_unpin_pages(obj);
1041 remove_mappable_node(&node);
1043 i915_gem_object_ggtt_unpin(obj);
1049 /* Per-page copy function for the shmem pwrite fastpath.
1050 * Flushes invalid cachelines before writing to the target if
1051 * needs_clflush_before is set and flushes out any written cachelines after
1052 * writing if needs_clflush is set. */
1054 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1055 char __user *user_data,
1056 bool page_do_bit17_swizzling,
1057 bool needs_clflush_before,
1058 bool needs_clflush_after)
1063 if (unlikely(page_do_bit17_swizzling))
1066 vaddr = kmap_atomic(page);
1067 if (needs_clflush_before)
1068 drm_clflush_virt_range(vaddr + shmem_page_offset,
1070 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1071 user_data, page_length);
1072 if (needs_clflush_after)
1073 drm_clflush_virt_range(vaddr + shmem_page_offset,
1075 kunmap_atomic(vaddr);
1077 return ret ? -EFAULT : 0;
1080 /* Only difference to the fast-path function is that this can handle bit17
1081 * and uses non-atomic copy and kmap functions. */
1083 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1084 char __user *user_data,
1085 bool page_do_bit17_swizzling,
1086 bool needs_clflush_before,
1087 bool needs_clflush_after)
1093 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1094 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1096 page_do_bit17_swizzling);
1097 if (page_do_bit17_swizzling)
1098 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1102 ret = __copy_from_user(vaddr + shmem_page_offset,
1105 if (needs_clflush_after)
1106 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1108 page_do_bit17_swizzling);
1111 return ret ? -EFAULT : 0;
1115 i915_gem_shmem_pwrite(struct drm_device *dev,
1116 struct drm_i915_gem_object *obj,
1117 struct drm_i915_gem_pwrite *args,
1118 struct drm_file *file)
1122 char __user *user_data;
1123 int shmem_page_offset, page_length, ret = 0;
1124 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1125 int hit_slowpath = 0;
1126 int needs_clflush_after = 0;
1127 int needs_clflush_before = 0;
1128 struct sg_page_iter sg_iter;
1130 user_data = u64_to_user_ptr(args->data_ptr);
1131 remain = args->size;
1133 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1135 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1136 /* If we're not in the cpu write domain, set ourself into the gtt
1137 * write domain and manually flush cachelines (if required). This
1138 * optimizes for the case when the gpu will use the data
1139 * right away and we therefore have to clflush anyway. */
1140 needs_clflush_after = cpu_write_needs_clflush(obj);
1141 ret = i915_gem_object_wait_rendering(obj, false);
1145 /* Same trick applies to invalidate partially written cachelines read
1146 * before writing. */
1147 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1148 needs_clflush_before =
1149 !cpu_cache_is_coherent(dev, obj->cache_level);
1151 ret = i915_gem_object_get_pages(obj);
1155 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1157 i915_gem_object_pin_pages(obj);
1159 offset = args->offset;
1162 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1163 offset >> PAGE_SHIFT) {
1164 struct page *page = sg_page_iter_page(&sg_iter);
1165 int partial_cacheline_write;
1170 /* Operation in this page
1172 * shmem_page_offset = offset within page in shmem file
1173 * page_length = bytes to copy for this page
1175 shmem_page_offset = offset_in_page(offset);
1177 page_length = remain;
1178 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1179 page_length = PAGE_SIZE - shmem_page_offset;
1181 /* If we don't overwrite a cacheline completely we need to be
1182 * careful to have up-to-date data by first clflushing. Don't
1183 * overcomplicate things and flush the entire patch. */
1184 partial_cacheline_write = needs_clflush_before &&
1185 ((shmem_page_offset | page_length)
1186 & (boot_cpu_data.x86_clflush_size - 1));
1188 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1189 (page_to_phys(page) & (1 << 17)) != 0;
1191 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1192 user_data, page_do_bit17_swizzling,
1193 partial_cacheline_write,
1194 needs_clflush_after);
1199 mutex_unlock(&dev->struct_mutex);
1200 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1201 user_data, page_do_bit17_swizzling,
1202 partial_cacheline_write,
1203 needs_clflush_after);
1205 mutex_lock(&dev->struct_mutex);
1211 remain -= page_length;
1212 user_data += page_length;
1213 offset += page_length;
1217 i915_gem_object_unpin_pages(obj);
1221 * Fixup: Flush cpu caches in case we didn't flush the dirty
1222 * cachelines in-line while writing and the object moved
1223 * out of the cpu write domain while we've dropped the lock.
1225 if (!needs_clflush_after &&
1226 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1227 if (i915_gem_clflush_object(obj, obj->pin_display))
1228 needs_clflush_after = true;
1232 if (needs_clflush_after)
1233 i915_gem_chipset_flush(to_i915(dev));
1235 obj->cache_dirty = true;
1237 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1242 * Writes data to the object referenced by handle.
1244 * @data: ioctl data blob
1247 * On error, the contents of the buffer that were to be modified are undefined.
1250 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *file)
1253 struct drm_i915_private *dev_priv = dev->dev_private;
1254 struct drm_i915_gem_pwrite *args = data;
1255 struct drm_i915_gem_object *obj;
1258 if (args->size == 0)
1261 if (!access_ok(VERIFY_READ,
1262 u64_to_user_ptr(args->data_ptr),
1266 if (likely(!i915.prefault_disable)) {
1267 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1273 intel_runtime_pm_get(dev_priv);
1275 ret = i915_mutex_lock_interruptible(dev);
1279 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1280 if (&obj->base == NULL) {
1285 /* Bounds check destination. */
1286 if (args->offset > obj->base.size ||
1287 args->size > obj->base.size - args->offset) {
1292 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1295 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1296 * it would end up going through the fenced access, and we'll get
1297 * different detiling behavior between reading and writing.
1298 * pread/pwrite currently are reading and writing from the CPU
1299 * perspective, requiring manual detiling by the client.
1301 if (!i915_gem_object_has_struct_page(obj) ||
1302 cpu_write_needs_clflush(obj)) {
1303 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1304 /* Note that the gtt paths might fail with non-page-backed user
1305 * pointers (e.g. gtt mappings when moving data between
1306 * textures). Fallback to the shmem path in that case. */
1309 if (ret == -EFAULT) {
1310 if (obj->phys_handle)
1311 ret = i915_gem_phys_pwrite(obj, args, file);
1312 else if (i915_gem_object_has_struct_page(obj))
1313 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1319 drm_gem_object_unreference(&obj->base);
1321 mutex_unlock(&dev->struct_mutex);
1323 intel_runtime_pm_put(dev_priv);
1329 i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1331 if (__i915_terminally_wedged(reset_counter))
1334 if (__i915_reset_in_progress(reset_counter)) {
1335 /* Non-interruptible callers can't handle -EAGAIN, hence return
1336 * -EIO unconditionally for these. */
1346 static void fake_irq(unsigned long data)
1348 wake_up_process((struct task_struct *)data);
1351 static bool missed_irq(struct drm_i915_private *dev_priv,
1352 struct intel_engine_cs *engine)
1354 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1357 static unsigned long local_clock_us(unsigned *cpu)
1361 /* Cheaply and approximately convert from nanoseconds to microseconds.
1362 * The result and subsequent calculations are also defined in the same
1363 * approximate microseconds units. The principal source of timing
1364 * error here is from the simple truncation.
1366 * Note that local_clock() is only defined wrt to the current CPU;
1367 * the comparisons are no longer valid if we switch CPUs. Instead of
1368 * blocking preemption for the entire busywait, we can detect the CPU
1369 * switch and use that as indicator of system load and a reason to
1370 * stop busywaiting, see busywait_stop().
1373 t = local_clock() >> 10;
1379 static bool busywait_stop(unsigned long timeout, unsigned cpu)
1383 if (time_after(local_clock_us(&this_cpu), timeout))
1386 return this_cpu != cpu;
1389 static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1391 unsigned long timeout;
1394 /* When waiting for high frequency requests, e.g. during synchronous
1395 * rendering split between the CPU and GPU, the finite amount of time
1396 * required to set up the irq and wait upon it limits the response
1397 * rate. By busywaiting on the request completion for a short while we
1398 * can service the high frequency waits as quick as possible. However,
1399 * if it is a slow request, we want to sleep as quickly as possible.
1400 * The tradeoff between waiting and sleeping is roughly the time it
1401 * takes to sleep on a request, on the order of a microsecond.
1404 if (req->engine->irq_refcount)
1407 /* Only spin if we know the GPU is processing this request */
1408 if (!i915_gem_request_started(req, true))
1411 timeout = local_clock_us(&cpu) + 5;
1412 while (!need_resched()) {
1413 if (i915_gem_request_completed(req, true))
1416 if (signal_pending_state(state, current))
1419 if (busywait_stop(timeout, cpu))
1422 cpu_relax_lowlatency();
1425 if (i915_gem_request_completed(req, false))
1432 * __i915_wait_request - wait until execution of request has finished
1434 * @interruptible: do an interruptible wait (normally yes)
1435 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1438 * Note: It is of utmost importance that the passed in seqno and reset_counter
1439 * values have been read by the caller in an smp safe manner. Where read-side
1440 * locks are involved, it is sufficient to read the reset_counter before
1441 * unlocking the lock that protects the seqno. For lockless tricks, the
1442 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1445 * Returns 0 if the request was found within the alloted time. Else returns the
1446 * errno with remaining time filled in timeout argument.
1448 int __i915_wait_request(struct drm_i915_gem_request *req,
1451 struct intel_rps_client *rps)
1453 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1454 struct drm_i915_private *dev_priv = req->i915;
1455 const bool irq_test_in_progress =
1456 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1457 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1459 unsigned long timeout_expire;
1460 s64 before = 0; /* Only to silence a compiler warning. */
1463 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1465 if (list_empty(&req->list))
1468 if (i915_gem_request_completed(req, true))
1473 if (WARN_ON(*timeout < 0))
1479 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1482 * Record current time in case interrupted by signal, or wedged.
1484 before = ktime_get_raw_ns();
1487 if (INTEL_INFO(dev_priv)->gen >= 6)
1488 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1490 trace_i915_gem_request_wait_begin(req);
1492 /* Optimistic spin for the next jiffie before touching IRQs */
1493 ret = __i915_spin_request(req, state);
1497 if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
1503 struct timer_list timer;
1505 prepare_to_wait(&engine->irq_queue, &wait, state);
1507 /* We need to check whether any gpu reset happened in between
1508 * the request being submitted and now. If a reset has occurred,
1509 * the request is effectively complete (we either are in the
1510 * process of or have discarded the rendering and completely
1511 * reset the GPU. The results of the request are lost and we
1512 * are free to continue on with the original operation.
1514 if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
1519 if (i915_gem_request_completed(req, false)) {
1524 if (signal_pending_state(state, current)) {
1529 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1534 timer.function = NULL;
1535 if (timeout || missed_irq(dev_priv, engine)) {
1536 unsigned long expire;
1538 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1539 expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1540 mod_timer(&timer, expire);
1545 if (timer.function) {
1546 del_singleshot_timer_sync(&timer);
1547 destroy_timer_on_stack(&timer);
1550 if (!irq_test_in_progress)
1551 engine->irq_put(engine);
1553 finish_wait(&engine->irq_queue, &wait);
1556 trace_i915_gem_request_wait_end(req);
1559 s64 tres = *timeout - (ktime_get_raw_ns() - before);
1561 *timeout = tres < 0 ? 0 : tres;
1564 * Apparently ktime isn't accurate enough and occasionally has a
1565 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1566 * things up to make the test happy. We allow up to 1 jiffy.
1568 * This is a regrssion from the timespec->ktime conversion.
1570 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1577 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1578 struct drm_file *file)
1580 struct drm_i915_file_private *file_priv;
1582 WARN_ON(!req || !file || req->file_priv);
1590 file_priv = file->driver_priv;
1592 spin_lock(&file_priv->mm.lock);
1593 req->file_priv = file_priv;
1594 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1595 spin_unlock(&file_priv->mm.lock);
1597 req->pid = get_pid(task_pid(current));
1603 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1605 struct drm_i915_file_private *file_priv = request->file_priv;
1610 spin_lock(&file_priv->mm.lock);
1611 list_del(&request->client_list);
1612 request->file_priv = NULL;
1613 spin_unlock(&file_priv->mm.lock);
1615 put_pid(request->pid);
1616 request->pid = NULL;
1619 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1621 trace_i915_gem_request_retire(request);
1623 /* We know the GPU must have read the request to have
1624 * sent us the seqno + interrupt, so use the position
1625 * of tail of the request to update the last known position
1628 * Note this requires that we are always called in request
1631 request->ringbuf->last_retired_head = request->postfix;
1633 list_del_init(&request->list);
1634 i915_gem_request_remove_from_client(request);
1636 if (request->previous_context) {
1637 if (i915.enable_execlists)
1638 intel_lr_context_unpin(request->previous_context,
1642 i915_gem_context_unreference(request->ctx);
1643 i915_gem_request_unreference(request);
1647 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1649 struct intel_engine_cs *engine = req->engine;
1650 struct drm_i915_gem_request *tmp;
1652 lockdep_assert_held(&engine->i915->dev->struct_mutex);
1654 if (list_empty(&req->list))
1658 tmp = list_first_entry(&engine->request_list,
1659 typeof(*tmp), list);
1661 i915_gem_request_retire(tmp);
1662 } while (tmp != req);
1664 WARN_ON(i915_verify_lists(engine->dev));
1668 * Waits for a request to be signaled, and cleans up the
1669 * request and object lists appropriately for that event.
1670 * @req: request to wait on
1673 i915_wait_request(struct drm_i915_gem_request *req)
1675 struct drm_i915_private *dev_priv = req->i915;
1679 interruptible = dev_priv->mm.interruptible;
1681 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1683 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1687 /* If the GPU hung, we want to keep the requests to find the guilty. */
1688 if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
1689 __i915_gem_request_retire__upto(req);
1695 * Ensures that all rendering to the object has completed and the object is
1696 * safe to unbind from the GTT or access from the CPU.
1697 * @obj: i915 gem object
1698 * @readonly: waiting for read access or write
1701 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1710 if (obj->last_write_req != NULL) {
1711 ret = i915_wait_request(obj->last_write_req);
1715 i = obj->last_write_req->engine->id;
1716 if (obj->last_read_req[i] == obj->last_write_req)
1717 i915_gem_object_retire__read(obj, i);
1719 i915_gem_object_retire__write(obj);
1722 for (i = 0; i < I915_NUM_ENGINES; i++) {
1723 if (obj->last_read_req[i] == NULL)
1726 ret = i915_wait_request(obj->last_read_req[i]);
1730 i915_gem_object_retire__read(obj, i);
1732 GEM_BUG_ON(obj->active);
1739 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1740 struct drm_i915_gem_request *req)
1742 int ring = req->engine->id;
1744 if (obj->last_read_req[ring] == req)
1745 i915_gem_object_retire__read(obj, ring);
1746 else if (obj->last_write_req == req)
1747 i915_gem_object_retire__write(obj);
1749 if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
1750 __i915_gem_request_retire__upto(req);
1753 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1754 * as the object state may change during this call.
1756 static __must_check int
1757 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1758 struct intel_rps_client *rps,
1761 struct drm_device *dev = obj->base.dev;
1762 struct drm_i915_private *dev_priv = dev->dev_private;
1763 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1766 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1767 BUG_ON(!dev_priv->mm.interruptible);
1773 struct drm_i915_gem_request *req;
1775 req = obj->last_write_req;
1779 requests[n++] = i915_gem_request_reference(req);
1781 for (i = 0; i < I915_NUM_ENGINES; i++) {
1782 struct drm_i915_gem_request *req;
1784 req = obj->last_read_req[i];
1788 requests[n++] = i915_gem_request_reference(req);
1792 mutex_unlock(&dev->struct_mutex);
1794 for (i = 0; ret == 0 && i < n; i++)
1795 ret = __i915_wait_request(requests[i], true, NULL, rps);
1796 mutex_lock(&dev->struct_mutex);
1798 for (i = 0; i < n; i++) {
1800 i915_gem_object_retire_request(obj, requests[i]);
1801 i915_gem_request_unreference(requests[i]);
1807 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1809 struct drm_i915_file_private *fpriv = file->driver_priv;
1813 static enum fb_op_origin
1814 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1816 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1817 ORIGIN_GTT : ORIGIN_CPU;
1821 * Called when user space prepares to use an object with the CPU, either
1822 * through the mmap ioctl's mapping or a GTT mapping.
1824 * @data: ioctl data blob
1828 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1829 struct drm_file *file)
1831 struct drm_i915_gem_set_domain *args = data;
1832 struct drm_i915_gem_object *obj;
1833 uint32_t read_domains = args->read_domains;
1834 uint32_t write_domain = args->write_domain;
1837 /* Only handle setting domains to types used by the CPU. */
1838 if (write_domain & I915_GEM_GPU_DOMAINS)
1841 if (read_domains & I915_GEM_GPU_DOMAINS)
1844 /* Having something in the write domain implies it's in the read
1845 * domain, and only that read domain. Enforce that in the request.
1847 if (write_domain != 0 && read_domains != write_domain)
1850 ret = i915_mutex_lock_interruptible(dev);
1854 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1855 if (&obj->base == NULL) {
1860 /* Try to flush the object off the GPU without holding the lock.
1861 * We will repeat the flush holding the lock in the normal manner
1862 * to catch cases where we are gazumped.
1864 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1865 to_rps_client(file),
1870 if (read_domains & I915_GEM_DOMAIN_GTT)
1871 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1873 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1875 if (write_domain != 0)
1876 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1879 drm_gem_object_unreference(&obj->base);
1881 mutex_unlock(&dev->struct_mutex);
1886 * Called when user space has done writes to this buffer
1888 * @data: ioctl data blob
1892 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1893 struct drm_file *file)
1895 struct drm_i915_gem_sw_finish *args = data;
1896 struct drm_i915_gem_object *obj;
1899 ret = i915_mutex_lock_interruptible(dev);
1903 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1904 if (&obj->base == NULL) {
1909 /* Pinned buffers may be scanout, so flush the cache */
1910 if (obj->pin_display)
1911 i915_gem_object_flush_cpu_write_domain(obj);
1913 drm_gem_object_unreference(&obj->base);
1915 mutex_unlock(&dev->struct_mutex);
1920 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1923 * @data: ioctl data blob
1926 * While the mapping holds a reference on the contents of the object, it doesn't
1927 * imply a ref on the object itself.
1931 * DRM driver writers who look a this function as an example for how to do GEM
1932 * mmap support, please don't implement mmap support like here. The modern way
1933 * to implement DRM mmap support is with an mmap offset ioctl (like
1934 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1935 * That way debug tooling like valgrind will understand what's going on, hiding
1936 * the mmap call in a driver private ioctl will break that. The i915 driver only
1937 * does cpu mmaps this way because we didn't know better.
1940 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1941 struct drm_file *file)
1943 struct drm_i915_gem_mmap *args = data;
1944 struct drm_gem_object *obj;
1947 if (args->flags & ~(I915_MMAP_WC))
1950 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1953 obj = drm_gem_object_lookup(file, args->handle);
1957 /* prime objects have no backing filp to GEM mmap
1961 drm_gem_object_unreference_unlocked(obj);
1965 addr = vm_mmap(obj->filp, 0, args->size,
1966 PROT_READ | PROT_WRITE, MAP_SHARED,
1968 if (args->flags & I915_MMAP_WC) {
1969 struct mm_struct *mm = current->mm;
1970 struct vm_area_struct *vma;
1972 if (down_write_killable(&mm->mmap_sem)) {
1973 drm_gem_object_unreference_unlocked(obj);
1976 vma = find_vma(mm, addr);
1979 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1982 up_write(&mm->mmap_sem);
1984 /* This may race, but that's ok, it only gets set */
1985 WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
1987 drm_gem_object_unreference_unlocked(obj);
1988 if (IS_ERR((void *)addr))
1991 args->addr_ptr = (uint64_t) addr;
1997 * i915_gem_fault - fault a page into the GTT
1998 * @vma: VMA in question
2001 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
2002 * from userspace. The fault handler takes care of binding the object to
2003 * the GTT (if needed), allocating and programming a fence register (again,
2004 * only if needed based on whether the old reg is still valid or the object
2005 * is tiled) and inserting a new PTE into the faulting process.
2007 * Note that the faulting process may involve evicting existing objects
2008 * from the GTT and/or fence registers to make room. So performance may
2009 * suffer if the GTT working set is large or there are few fence registers
2012 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2014 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
2015 struct drm_device *dev = obj->base.dev;
2016 struct drm_i915_private *dev_priv = to_i915(dev);
2017 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2018 struct i915_ggtt_view view = i915_ggtt_view_normal;
2019 pgoff_t page_offset;
2022 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
2024 intel_runtime_pm_get(dev_priv);
2026 /* We don't use vmf->pgoff since that has the fake offset */
2027 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
2030 ret = i915_mutex_lock_interruptible(dev);
2034 trace_i915_gem_object_fault(obj, page_offset, true, write);
2036 /* Try to flush the object off the GPU first without holding the lock.
2037 * Upon reacquiring the lock, we will perform our sanity checks and then
2038 * repeat the flush holding the lock in the normal manner to catch cases
2039 * where we are gazumped.
2041 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
2045 /* Access to snoopable pages through the GTT is incoherent. */
2046 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
2051 /* Use a partial view if the object is bigger than the aperture. */
2052 if (obj->base.size >= ggtt->mappable_end &&
2053 obj->tiling_mode == I915_TILING_NONE) {
2054 static const unsigned int chunk_size = 256; // 1 MiB
2056 memset(&view, 0, sizeof(view));
2057 view.type = I915_GGTT_VIEW_PARTIAL;
2058 view.params.partial.offset = rounddown(page_offset, chunk_size);
2059 view.params.partial.size =
2062 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
2063 view.params.partial.offset);
2066 /* Now pin it into the GTT if needed */
2067 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
2071 ret = i915_gem_object_set_to_gtt_domain(obj, write);
2075 ret = i915_gem_object_get_fence(obj);
2079 /* Finally, remap it using the new GTT offset */
2080 pfn = ggtt->mappable_base +
2081 i915_gem_obj_ggtt_offset_view(obj, &view);
2084 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
2085 /* Overriding existing pages in partial view does not cause
2086 * us any trouble as TLBs are still valid because the fault
2087 * is due to userspace losing part of the mapping or never
2088 * having accessed it before (at this partials' range).
2090 unsigned long base = vma->vm_start +
2091 (view.params.partial.offset << PAGE_SHIFT);
2094 for (i = 0; i < view.params.partial.size; i++) {
2095 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
2100 obj->fault_mappable = true;
2102 if (!obj->fault_mappable) {
2103 unsigned long size = min_t(unsigned long,
2104 vma->vm_end - vma->vm_start,
2108 for (i = 0; i < size >> PAGE_SHIFT; i++) {
2109 ret = vm_insert_pfn(vma,
2110 (unsigned long)vma->vm_start + i * PAGE_SIZE,
2116 obj->fault_mappable = true;
2118 ret = vm_insert_pfn(vma,
2119 (unsigned long)vmf->virtual_address,
2123 i915_gem_object_ggtt_unpin_view(obj, &view);
2125 mutex_unlock(&dev->struct_mutex);
2130 * We eat errors when the gpu is terminally wedged to avoid
2131 * userspace unduly crashing (gl has no provisions for mmaps to
2132 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2133 * and so needs to be reported.
2135 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2136 ret = VM_FAULT_SIGBUS;
2141 * EAGAIN means the gpu is hung and we'll wait for the error
2142 * handler to reset everything when re-faulting in
2143 * i915_mutex_lock_interruptible.
2150 * EBUSY is ok: this just means that another thread
2151 * already did the job.
2153 ret = VM_FAULT_NOPAGE;
2160 ret = VM_FAULT_SIGBUS;
2163 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2164 ret = VM_FAULT_SIGBUS;
2168 intel_runtime_pm_put(dev_priv);
2173 * i915_gem_release_mmap - remove physical page mappings
2174 * @obj: obj in question
2176 * Preserve the reservation of the mmapping with the DRM core code, but
2177 * relinquish ownership of the pages back to the system.
2179 * It is vital that we remove the page mapping if we have mapped a tiled
2180 * object through the GTT and then lose the fence register due to
2181 * resource pressure. Similarly if the object has been moved out of the
2182 * aperture, than pages mapped into userspace must be revoked. Removing the
2183 * mapping will then trigger a page fault on the next user access, allowing
2184 * fixup by i915_gem_fault().
2187 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2189 /* Serialisation between user GTT access and our code depends upon
2190 * revoking the CPU's PTE whilst the mutex is held. The next user
2191 * pagefault then has to wait until we release the mutex.
2193 lockdep_assert_held(&obj->base.dev->struct_mutex);
2195 if (!obj->fault_mappable)
2198 drm_vma_node_unmap(&obj->base.vma_node,
2199 obj->base.dev->anon_inode->i_mapping);
2201 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2202 * memory transactions from userspace before we return. The TLB
2203 * flushing implied above by changing the PTE above *should* be
2204 * sufficient, an extra barrier here just provides us with a bit
2205 * of paranoid documentation about our requirement to serialise
2206 * memory writes before touching registers / GSM.
2210 obj->fault_mappable = false;
2214 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
2216 struct drm_i915_gem_object *obj;
2218 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
2219 i915_gem_release_mmap(obj);
2223 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
2227 if (INTEL_INFO(dev)->gen >= 4 ||
2228 tiling_mode == I915_TILING_NONE)
2231 /* Previous chips need a power-of-two fence region when tiling */
2233 gtt_size = 1024*1024;
2235 gtt_size = 512*1024;
2237 while (gtt_size < size)
2244 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
2246 * @size: object size
2247 * @tiling_mode: tiling mode
2248 * @fenced: is fenced alignemned required or not
2250 * Return the required GTT alignment for an object, taking into account
2251 * potential fence register mapping.
2254 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2255 int tiling_mode, bool fenced)
2258 * Minimum alignment is 4k (GTT page size), but might be greater
2259 * if a fence register is needed for the object.
2261 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2262 tiling_mode == I915_TILING_NONE)
2266 * Previous chips need to be aligned to the size of the smallest
2267 * fence register that can contain the object.
2269 return i915_gem_get_gtt_size(dev, size, tiling_mode);
2272 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2274 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2277 dev_priv->mm.shrinker_no_lock_stealing = true;
2279 ret = drm_gem_create_mmap_offset(&obj->base);
2283 /* Badly fragmented mmap space? The only way we can recover
2284 * space is by destroying unwanted objects. We can't randomly release
2285 * mmap_offsets as userspace expects them to be persistent for the
2286 * lifetime of the objects. The closest we can is to release the
2287 * offsets on purgeable objects by truncating it and marking it purged,
2288 * which prevents userspace from ever using that object again.
2290 i915_gem_shrink(dev_priv,
2291 obj->base.size >> PAGE_SHIFT,
2293 I915_SHRINK_UNBOUND |
2294 I915_SHRINK_PURGEABLE);
2295 ret = drm_gem_create_mmap_offset(&obj->base);
2299 i915_gem_shrink_all(dev_priv);
2300 ret = drm_gem_create_mmap_offset(&obj->base);
2302 dev_priv->mm.shrinker_no_lock_stealing = false;
2307 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2309 drm_gem_free_mmap_offset(&obj->base);
2313 i915_gem_mmap_gtt(struct drm_file *file,
2314 struct drm_device *dev,
2318 struct drm_i915_gem_object *obj;
2321 ret = i915_mutex_lock_interruptible(dev);
2325 obj = to_intel_bo(drm_gem_object_lookup(file, handle));
2326 if (&obj->base == NULL) {
2331 if (obj->madv != I915_MADV_WILLNEED) {
2332 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2337 ret = i915_gem_object_create_mmap_offset(obj);
2341 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2344 drm_gem_object_unreference(&obj->base);
2346 mutex_unlock(&dev->struct_mutex);
2351 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2353 * @data: GTT mapping ioctl data
2354 * @file: GEM object info
2356 * Simply returns the fake offset to userspace so it can mmap it.
2357 * The mmap call will end up in drm_gem_mmap(), which will set things
2358 * up so we can get faults in the handler above.
2360 * The fault handler will take care of binding the object into the GTT
2361 * (since it may have been evicted to make room for something), allocating
2362 * a fence register, and mapping the appropriate aperture address into
2366 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2367 struct drm_file *file)
2369 struct drm_i915_gem_mmap_gtt *args = data;
2371 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2374 /* Immediately discard the backing storage */
2376 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2378 i915_gem_object_free_mmap_offset(obj);
2380 if (obj->base.filp == NULL)
2383 /* Our goal here is to return as much of the memory as
2384 * is possible back to the system as we are called from OOM.
2385 * To do this we must instruct the shmfs to drop all of its
2386 * backing pages, *now*.
2388 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2389 obj->madv = __I915_MADV_PURGED;
2392 /* Try to discard unwanted pages */
2394 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2396 struct address_space *mapping;
2398 switch (obj->madv) {
2399 case I915_MADV_DONTNEED:
2400 i915_gem_object_truncate(obj);
2401 case __I915_MADV_PURGED:
2405 if (obj->base.filp == NULL)
2408 mapping = file_inode(obj->base.filp)->i_mapping,
2409 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2413 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2415 struct sgt_iter sgt_iter;
2419 BUG_ON(obj->madv == __I915_MADV_PURGED);
2421 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2423 /* In the event of a disaster, abandon all caches and
2424 * hope for the best.
2426 i915_gem_clflush_object(obj, true);
2427 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2430 i915_gem_gtt_finish_object(obj);
2432 if (i915_gem_object_needs_bit17_swizzle(obj))
2433 i915_gem_object_save_bit_17_swizzle(obj);
2435 if (obj->madv == I915_MADV_DONTNEED)
2438 for_each_sgt_page(page, sgt_iter, obj->pages) {
2440 set_page_dirty(page);
2442 if (obj->madv == I915_MADV_WILLNEED)
2443 mark_page_accessed(page);
2449 sg_free_table(obj->pages);
2454 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2456 const struct drm_i915_gem_object_ops *ops = obj->ops;
2458 if (obj->pages == NULL)
2461 if (obj->pages_pin_count)
2464 BUG_ON(i915_gem_obj_bound_any(obj));
2466 /* ->put_pages might need to allocate memory for the bit17 swizzle
2467 * array, hence protect them from being reaped by removing them from gtt
2469 list_del(&obj->global_list);
2472 if (is_vmalloc_addr(obj->mapping))
2473 vunmap(obj->mapping);
2475 kunmap(kmap_to_page(obj->mapping));
2476 obj->mapping = NULL;
2479 ops->put_pages(obj);
2482 i915_gem_object_invalidate(obj);
2488 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2490 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2492 struct address_space *mapping;
2493 struct sg_table *st;
2494 struct scatterlist *sg;
2495 struct sgt_iter sgt_iter;
2497 unsigned long last_pfn = 0; /* suppress gcc warning */
2501 /* Assert that the object is not currently in any GPU domain. As it
2502 * wasn't in the GTT, there shouldn't be any way it could have been in
2505 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2506 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2508 st = kmalloc(sizeof(*st), GFP_KERNEL);
2512 page_count = obj->base.size / PAGE_SIZE;
2513 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2518 /* Get the list of pages out of our struct file. They'll be pinned
2519 * at this point until we release them.
2521 * Fail silently without starting the shrinker
2523 mapping = file_inode(obj->base.filp)->i_mapping;
2524 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2525 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2528 for (i = 0; i < page_count; i++) {
2529 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2531 i915_gem_shrink(dev_priv,
2534 I915_SHRINK_UNBOUND |
2535 I915_SHRINK_PURGEABLE);
2536 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2539 /* We've tried hard to allocate the memory by reaping
2540 * our own buffer, now let the real VM do its job and
2541 * go down in flames if truly OOM.
2543 i915_gem_shrink_all(dev_priv);
2544 page = shmem_read_mapping_page(mapping, i);
2546 ret = PTR_ERR(page);
2550 #ifdef CONFIG_SWIOTLB
2551 if (swiotlb_nr_tbl()) {
2553 sg_set_page(sg, page, PAGE_SIZE, 0);
2558 if (!i || page_to_pfn(page) != last_pfn + 1) {
2562 sg_set_page(sg, page, PAGE_SIZE, 0);
2564 sg->length += PAGE_SIZE;
2566 last_pfn = page_to_pfn(page);
2568 /* Check that the i965g/gm workaround works. */
2569 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2571 #ifdef CONFIG_SWIOTLB
2572 if (!swiotlb_nr_tbl())
2577 ret = i915_gem_gtt_prepare_object(obj);
2581 if (i915_gem_object_needs_bit17_swizzle(obj))
2582 i915_gem_object_do_bit_17_swizzle(obj);
2584 if (obj->tiling_mode != I915_TILING_NONE &&
2585 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2586 i915_gem_object_pin_pages(obj);
2592 for_each_sgt_page(page, sgt_iter, st)
2597 /* shmemfs first checks if there is enough memory to allocate the page
2598 * and reports ENOSPC should there be insufficient, along with the usual
2599 * ENOMEM for a genuine allocation failure.
2601 * We use ENOSPC in our driver to mean that we have run out of aperture
2602 * space and so want to translate the error from shmemfs back to our
2603 * usual understanding of ENOMEM.
2611 /* Ensure that the associated pages are gathered from the backing storage
2612 * and pinned into our object. i915_gem_object_get_pages() may be called
2613 * multiple times before they are released by a single call to
2614 * i915_gem_object_put_pages() - once the pages are no longer referenced
2615 * either as a result of memory pressure (reaping pages under the shrinker)
2616 * or as the object is itself released.
2619 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2621 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2622 const struct drm_i915_gem_object_ops *ops = obj->ops;
2628 if (obj->madv != I915_MADV_WILLNEED) {
2629 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2633 BUG_ON(obj->pages_pin_count);
2635 ret = ops->get_pages(obj);
2639 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2641 obj->get_page.sg = obj->pages->sgl;
2642 obj->get_page.last = 0;
2647 /* The 'mapping' part of i915_gem_object_pin_map() below */
2648 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2650 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2651 struct sg_table *sgt = obj->pages;
2652 struct sgt_iter sgt_iter;
2654 struct page *stack_pages[32];
2655 struct page **pages = stack_pages;
2656 unsigned long i = 0;
2659 /* A single page can always be kmapped */
2661 return kmap(sg_page(sgt->sgl));
2663 if (n_pages > ARRAY_SIZE(stack_pages)) {
2664 /* Too big for stack -- allocate temporary array instead */
2665 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2670 for_each_sgt_page(page, sgt_iter, sgt)
2673 /* Check that we have the expected number of pages */
2674 GEM_BUG_ON(i != n_pages);
2676 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2678 if (pages != stack_pages)
2679 drm_free_large(pages);
2684 /* get, pin, and map the pages of the object into kernel space */
2685 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2689 lockdep_assert_held(&obj->base.dev->struct_mutex);
2691 ret = i915_gem_object_get_pages(obj);
2693 return ERR_PTR(ret);
2695 i915_gem_object_pin_pages(obj);
2697 if (!obj->mapping) {
2698 obj->mapping = i915_gem_object_map(obj);
2699 if (!obj->mapping) {
2700 i915_gem_object_unpin_pages(obj);
2701 return ERR_PTR(-ENOMEM);
2705 return obj->mapping;
2708 void i915_vma_move_to_active(struct i915_vma *vma,
2709 struct drm_i915_gem_request *req)
2711 struct drm_i915_gem_object *obj = vma->obj;
2712 struct intel_engine_cs *engine;
2714 engine = i915_gem_request_get_engine(req);
2716 /* Add a reference if we're newly entering the active list. */
2717 if (obj->active == 0)
2718 drm_gem_object_reference(&obj->base);
2719 obj->active |= intel_engine_flag(engine);
2721 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2722 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2724 list_move_tail(&vma->vm_link, &vma->vm->active_list);
2728 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2730 GEM_BUG_ON(obj->last_write_req == NULL);
2731 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2733 i915_gem_request_assign(&obj->last_write_req, NULL);
2734 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2738 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2740 struct i915_vma *vma;
2742 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2743 GEM_BUG_ON(!(obj->active & (1 << ring)));
2745 list_del_init(&obj->engine_list[ring]);
2746 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2748 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2749 i915_gem_object_retire__write(obj);
2751 obj->active &= ~(1 << ring);
2755 /* Bump our place on the bound list to keep it roughly in LRU order
2756 * so that we don't steal from recently used but inactive objects
2757 * (unless we are forced to ofc!)
2759 list_move_tail(&obj->global_list,
2760 &to_i915(obj->base.dev)->mm.bound_list);
2762 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2763 if (!list_empty(&vma->vm_link))
2764 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2767 i915_gem_request_assign(&obj->last_fenced_req, NULL);
2768 drm_gem_object_unreference(&obj->base);
2772 i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2774 struct intel_engine_cs *engine;
2777 /* Carefully retire all requests without writing to the rings */
2778 for_each_engine(engine, dev_priv) {
2779 ret = intel_engine_idle(engine);
2783 i915_gem_retire_requests(dev_priv);
2785 /* Finally reset hw state */
2786 for_each_engine(engine, dev_priv)
2787 intel_ring_init_seqno(engine, seqno);
2792 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2794 struct drm_i915_private *dev_priv = dev->dev_private;
2800 /* HWS page needs to be set less than what we
2801 * will inject to ring
2803 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2807 /* Carefully set the last_seqno value so that wrap
2808 * detection still works
2810 dev_priv->next_seqno = seqno;
2811 dev_priv->last_seqno = seqno - 1;
2812 if (dev_priv->last_seqno == 0)
2813 dev_priv->last_seqno--;
2819 i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2821 /* reserve 0 for non-seqno */
2822 if (dev_priv->next_seqno == 0) {
2823 int ret = i915_gem_init_seqno(dev_priv, 0);
2827 dev_priv->next_seqno = 1;
2830 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2835 * NB: This function is not allowed to fail. Doing so would mean the the
2836 * request is not being tracked for completion but the work itself is
2837 * going to happen on the hardware. This would be a Bad Thing(tm).
2839 void __i915_add_request(struct drm_i915_gem_request *request,
2840 struct drm_i915_gem_object *obj,
2843 struct intel_engine_cs *engine;
2844 struct drm_i915_private *dev_priv;
2845 struct intel_ringbuffer *ringbuf;
2850 if (WARN_ON(request == NULL))
2853 engine = request->engine;
2854 dev_priv = request->i915;
2855 ringbuf = request->ringbuf;
2858 * To ensure that this call will not fail, space for its emissions
2859 * should already have been reserved in the ring buffer. Let the ring
2860 * know that it is time to use that space up.
2862 request_start = intel_ring_get_tail(ringbuf);
2863 reserved_tail = request->reserved_space;
2864 request->reserved_space = 0;
2867 * Emit any outstanding flushes - execbuf can fail to emit the flush
2868 * after having emitted the batchbuffer command. Hence we need to fix
2869 * things up similar to emitting the lazy request. The difference here
2870 * is that the flush _must_ happen before the next request, no matter
2874 if (i915.enable_execlists)
2875 ret = logical_ring_flush_all_caches(request);
2877 ret = intel_ring_flush_all_caches(request);
2878 /* Not allowed to fail! */
2879 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2882 trace_i915_gem_request_add(request);
2884 request->head = request_start;
2886 /* Whilst this request exists, batch_obj will be on the
2887 * active_list, and so will hold the active reference. Only when this
2888 * request is retired will the the batch_obj be moved onto the
2889 * inactive_list and lose its active reference. Hence we do not need
2890 * to explicitly hold another reference here.
2892 request->batch_obj = obj;
2894 /* Seal the request and mark it as pending execution. Note that
2895 * we may inspect this state, without holding any locks, during
2896 * hangcheck. Hence we apply the barrier to ensure that we do not
2897 * see a more recent value in the hws than we are tracking.
2899 request->emitted_jiffies = jiffies;
2900 request->previous_seqno = engine->last_submitted_seqno;
2901 smp_store_mb(engine->last_submitted_seqno, request->seqno);
2902 list_add_tail(&request->list, &engine->request_list);
2904 /* Record the position of the start of the request so that
2905 * should we detect the updated seqno part-way through the
2906 * GPU processing the request, we never over-estimate the
2907 * position of the head.
2909 request->postfix = intel_ring_get_tail(ringbuf);
2911 if (i915.enable_execlists)
2912 ret = engine->emit_request(request);
2914 ret = engine->add_request(request);
2916 request->tail = intel_ring_get_tail(ringbuf);
2918 /* Not allowed to fail! */
2919 WARN(ret, "emit|add_request failed: %d!\n", ret);
2921 i915_queue_hangcheck(engine->i915);
2923 queue_delayed_work(dev_priv->wq,
2924 &dev_priv->mm.retire_work,
2925 round_jiffies_up_relative(HZ));
2926 intel_mark_busy(dev_priv);
2928 /* Sanity check that the reserved size was large enough. */
2929 ret = intel_ring_get_tail(ringbuf) - request_start;
2931 ret += ringbuf->size;
2932 WARN_ONCE(ret > reserved_tail,
2933 "Not enough space reserved (%d bytes) "
2934 "for adding the request (%d bytes)\n",
2935 reserved_tail, ret);
2938 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2939 const struct i915_gem_context *ctx)
2941 unsigned long elapsed;
2943 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2945 if (ctx->hang_stats.banned)
2948 if (ctx->hang_stats.ban_period_seconds &&
2949 elapsed <= ctx->hang_stats.ban_period_seconds) {
2950 if (!i915_gem_context_is_default(ctx)) {
2951 DRM_DEBUG("context hanging too fast, banning!\n");
2953 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2954 if (i915_stop_ring_allow_warn(dev_priv))
2955 DRM_ERROR("gpu hanging too fast, banning!\n");
2963 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2964 struct i915_gem_context *ctx,
2967 struct i915_ctx_hang_stats *hs;
2972 hs = &ctx->hang_stats;
2975 hs->banned = i915_context_is_banned(dev_priv, ctx);
2977 hs->guilty_ts = get_seconds();
2979 hs->batch_pending++;
2983 void i915_gem_request_free(struct kref *req_ref)
2985 struct drm_i915_gem_request *req = container_of(req_ref,
2987 kmem_cache_free(req->i915->requests, req);
2991 __i915_gem_request_alloc(struct intel_engine_cs *engine,
2992 struct i915_gem_context *ctx,
2993 struct drm_i915_gem_request **req_out)
2995 struct drm_i915_private *dev_priv = engine->i915;
2996 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2997 struct drm_i915_gem_request *req;
3005 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
3006 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
3009 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
3013 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
3017 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
3021 kref_init(&req->ref);
3022 req->i915 = dev_priv;
3023 req->engine = engine;
3024 req->reset_counter = reset_counter;
3026 i915_gem_context_reference(req->ctx);
3029 * Reserve space in the ring buffer for all the commands required to
3030 * eventually emit this request. This is to guarantee that the
3031 * i915_add_request() call can't fail. Note that the reserve may need
3032 * to be redone if the request is not actually submitted straight
3033 * away, e.g. because a GPU scheduler has deferred it.
3035 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
3037 if (i915.enable_execlists)
3038 ret = intel_logical_ring_alloc_request_extras(req);
3040 ret = intel_ring_alloc_request_extras(req);
3048 i915_gem_context_unreference(ctx);
3050 kmem_cache_free(dev_priv->requests, req);
3055 * i915_gem_request_alloc - allocate a request structure
3057 * @engine: engine that we wish to issue the request on.
3058 * @ctx: context that the request will be associated with.
3059 * This can be NULL if the request is not directly related to
3060 * any specific user context, in which case this function will
3061 * choose an appropriate context to use.
3063 * Returns a pointer to the allocated request if successful,
3064 * or an error code if not.
3066 struct drm_i915_gem_request *
3067 i915_gem_request_alloc(struct intel_engine_cs *engine,
3068 struct i915_gem_context *ctx)
3070 struct drm_i915_gem_request *req;
3074 ctx = engine->i915->kernel_context;
3075 err = __i915_gem_request_alloc(engine, ctx, &req);
3076 return err ? ERR_PTR(err) : req;
3079 struct drm_i915_gem_request *
3080 i915_gem_find_active_request(struct intel_engine_cs *engine)
3082 struct drm_i915_gem_request *request;
3084 list_for_each_entry(request, &engine->request_list, list) {
3085 if (i915_gem_request_completed(request, false))
3094 static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
3095 struct intel_engine_cs *engine)
3097 struct drm_i915_gem_request *request;
3100 request = i915_gem_find_active_request(engine);
3102 if (request == NULL)
3105 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
3107 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
3109 list_for_each_entry_continue(request, &engine->request_list, list)
3110 i915_set_reset_status(dev_priv, request->ctx, false);
3113 static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
3114 struct intel_engine_cs *engine)
3116 struct intel_ringbuffer *buffer;
3118 while (!list_empty(&engine->active_list)) {
3119 struct drm_i915_gem_object *obj;
3121 obj = list_first_entry(&engine->active_list,
3122 struct drm_i915_gem_object,
3123 engine_list[engine->id]);
3125 i915_gem_object_retire__read(obj, engine->id);
3129 * Clear the execlists queue up before freeing the requests, as those
3130 * are the ones that keep the context and ringbuffer backing objects
3134 if (i915.enable_execlists) {
3135 /* Ensure irq handler finishes or is cancelled. */
3136 tasklet_kill(&engine->irq_tasklet);
3138 intel_execlists_cancel_requests(engine);
3142 * We must free the requests after all the corresponding objects have
3143 * been moved off active lists. Which is the same order as the normal
3144 * retire_requests function does. This is important if object hold
3145 * implicit references on things like e.g. ppgtt address spaces through
3148 while (!list_empty(&engine->request_list)) {
3149 struct drm_i915_gem_request *request;
3151 request = list_first_entry(&engine->request_list,
3152 struct drm_i915_gem_request,
3155 i915_gem_request_retire(request);
3158 /* Having flushed all requests from all queues, we know that all
3159 * ringbuffers must now be empty. However, since we do not reclaim
3160 * all space when retiring the request (to prevent HEADs colliding
3161 * with rapid ringbuffer wraparound) the amount of available space
3162 * upon reset is less than when we start. Do one more pass over
3163 * all the ringbuffers to reset last_retired_head.
3165 list_for_each_entry(buffer, &engine->buffers, link) {
3166 buffer->last_retired_head = buffer->tail;
3167 intel_ring_update_space(buffer);
3170 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
3173 void i915_gem_reset(struct drm_device *dev)
3175 struct drm_i915_private *dev_priv = dev->dev_private;
3176 struct intel_engine_cs *engine;
3179 * Before we free the objects from the requests, we need to inspect
3180 * them for finding the guilty party. As the requests only borrow
3181 * their reference to the objects, the inspection must be done first.
3183 for_each_engine(engine, dev_priv)
3184 i915_gem_reset_engine_status(dev_priv, engine);
3186 for_each_engine(engine, dev_priv)
3187 i915_gem_reset_engine_cleanup(dev_priv, engine);
3189 i915_gem_context_reset(dev);
3191 i915_gem_restore_fences(dev);
3193 WARN_ON(i915_verify_lists(dev));
3197 * This function clears the request list as sequence numbers are passed.
3198 * @engine: engine to retire requests on
3201 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3203 WARN_ON(i915_verify_lists(engine->dev));
3205 /* Retire requests first as we use it above for the early return.
3206 * If we retire requests last, we may use a later seqno and so clear
3207 * the requests lists without clearing the active list, leading to
3210 while (!list_empty(&engine->request_list)) {
3211 struct drm_i915_gem_request *request;
3213 request = list_first_entry(&engine->request_list,
3214 struct drm_i915_gem_request,
3217 if (!i915_gem_request_completed(request, true))
3220 i915_gem_request_retire(request);
3223 /* Move any buffers on the active list that are no longer referenced
3224 * by the ringbuffer to the flushing/inactive lists as appropriate,
3225 * before we free the context associated with the requests.
3227 while (!list_empty(&engine->active_list)) {
3228 struct drm_i915_gem_object *obj;
3230 obj = list_first_entry(&engine->active_list,
3231 struct drm_i915_gem_object,
3232 engine_list[engine->id]);
3234 if (!list_empty(&obj->last_read_req[engine->id]->list))
3237 i915_gem_object_retire__read(obj, engine->id);
3240 if (unlikely(engine->trace_irq_req &&
3241 i915_gem_request_completed(engine->trace_irq_req, true))) {
3242 engine->irq_put(engine);
3243 i915_gem_request_assign(&engine->trace_irq_req, NULL);
3246 WARN_ON(i915_verify_lists(engine->dev));
3250 i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3252 struct intel_engine_cs *engine;
3255 for_each_engine(engine, dev_priv) {
3256 i915_gem_retire_requests_ring(engine);
3257 idle &= list_empty(&engine->request_list);
3258 if (i915.enable_execlists) {
3259 spin_lock_bh(&engine->execlist_lock);
3260 idle &= list_empty(&engine->execlist_queue);
3261 spin_unlock_bh(&engine->execlist_lock);
3266 mod_delayed_work(dev_priv->wq,
3267 &dev_priv->mm.idle_work,
3268 msecs_to_jiffies(100));
3274 i915_gem_retire_work_handler(struct work_struct *work)
3276 struct drm_i915_private *dev_priv =
3277 container_of(work, typeof(*dev_priv), mm.retire_work.work);
3278 struct drm_device *dev = dev_priv->dev;
3281 /* Come back later if the device is busy... */
3283 if (mutex_trylock(&dev->struct_mutex)) {
3284 idle = i915_gem_retire_requests(dev_priv);
3285 mutex_unlock(&dev->struct_mutex);
3288 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
3289 round_jiffies_up_relative(HZ));
3293 i915_gem_idle_work_handler(struct work_struct *work)
3295 struct drm_i915_private *dev_priv =
3296 container_of(work, typeof(*dev_priv), mm.idle_work.work);
3297 struct drm_device *dev = dev_priv->dev;
3298 struct intel_engine_cs *engine;
3300 for_each_engine(engine, dev_priv)
3301 if (!list_empty(&engine->request_list))
3304 /* we probably should sync with hangcheck here, using cancel_work_sync.
3305 * Also locking seems to be fubar here, engine->request_list is protected
3306 * by dev->struct_mutex. */
3308 intel_mark_idle(dev_priv);
3310 if (mutex_trylock(&dev->struct_mutex)) {
3311 for_each_engine(engine, dev_priv)
3312 i915_gem_batch_pool_fini(&engine->batch_pool);
3314 mutex_unlock(&dev->struct_mutex);
3319 * Ensures that an object will eventually get non-busy by flushing any required
3320 * write domains, emitting any outstanding lazy request and retiring and
3321 * completed requests.
3322 * @obj: object to flush
3325 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3332 for (i = 0; i < I915_NUM_ENGINES; i++) {
3333 struct drm_i915_gem_request *req;
3335 req = obj->last_read_req[i];
3339 if (i915_gem_request_completed(req, true))
3340 i915_gem_object_retire__read(obj, i);
3347 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3348 * @dev: drm device pointer
3349 * @data: ioctl data blob
3350 * @file: drm file pointer
3352 * Returns 0 if successful, else an error is returned with the remaining time in
3353 * the timeout parameter.
3354 * -ETIME: object is still busy after timeout
3355 * -ERESTARTSYS: signal interrupted the wait
3356 * -ENONENT: object doesn't exist
3357 * Also possible, but rare:
3358 * -EAGAIN: GPU wedged
3360 * -ENODEV: Internal IRQ fail
3361 * -E?: The add request failed
3363 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3364 * non-zero timeout parameter the wait ioctl will wait for the given number of
3365 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3366 * without holding struct_mutex the object may become re-busied before this
3367 * function completes. A similar but shorter * race condition exists in the busy
3371 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3373 struct drm_i915_gem_wait *args = data;
3374 struct drm_i915_gem_object *obj;
3375 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3379 if (args->flags != 0)
3382 ret = i915_mutex_lock_interruptible(dev);
3386 obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
3387 if (&obj->base == NULL) {
3388 mutex_unlock(&dev->struct_mutex);
3392 /* Need to make sure the object gets inactive eventually. */
3393 ret = i915_gem_object_flush_active(obj);
3400 /* Do this after OLR check to make sure we make forward progress polling
3401 * on this IOCTL with a timeout == 0 (like busy ioctl)
3403 if (args->timeout_ns == 0) {
3408 drm_gem_object_unreference(&obj->base);
3410 for (i = 0; i < I915_NUM_ENGINES; i++) {
3411 if (obj->last_read_req[i] == NULL)
3414 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3417 mutex_unlock(&dev->struct_mutex);
3419 for (i = 0; i < n; i++) {
3421 ret = __i915_wait_request(req[i], true,
3422 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3423 to_rps_client(file));
3424 i915_gem_request_unreference(req[i]);
3429 drm_gem_object_unreference(&obj->base);
3430 mutex_unlock(&dev->struct_mutex);
3435 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3436 struct intel_engine_cs *to,
3437 struct drm_i915_gem_request *from_req,
3438 struct drm_i915_gem_request **to_req)
3440 struct intel_engine_cs *from;
3443 from = i915_gem_request_get_engine(from_req);
3447 if (i915_gem_request_completed(from_req, true))
3450 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3451 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3452 ret = __i915_wait_request(from_req,
3453 i915->mm.interruptible,
3455 &i915->rps.semaphores);
3459 i915_gem_object_retire_request(obj, from_req);
3461 int idx = intel_ring_sync_index(from, to);
3462 u32 seqno = i915_gem_request_get_seqno(from_req);
3466 if (seqno <= from->semaphore.sync_seqno[idx])
3469 if (*to_req == NULL) {
3470 struct drm_i915_gem_request *req;
3472 req = i915_gem_request_alloc(to, NULL);
3474 return PTR_ERR(req);
3479 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3480 ret = to->semaphore.sync_to(*to_req, from, seqno);
3484 /* We use last_read_req because sync_to()
3485 * might have just caused seqno wrap under
3488 from->semaphore.sync_seqno[idx] =
3489 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3496 * i915_gem_object_sync - sync an object to a ring.
3498 * @obj: object which may be in use on another ring.
3499 * @to: ring we wish to use the object on. May be NULL.
3500 * @to_req: request we wish to use the object for. See below.
3501 * This will be allocated and returned if a request is
3502 * required but not passed in.
3504 * This code is meant to abstract object synchronization with the GPU.
3505 * Calling with NULL implies synchronizing the object with the CPU
3506 * rather than a particular GPU ring. Conceptually we serialise writes
3507 * between engines inside the GPU. We only allow one engine to write
3508 * into a buffer at any time, but multiple readers. To ensure each has
3509 * a coherent view of memory, we must:
3511 * - If there is an outstanding write request to the object, the new
3512 * request must wait for it to complete (either CPU or in hw, requests
3513 * on the same ring will be naturally ordered).
3515 * - If we are a write request (pending_write_domain is set), the new
3516 * request must wait for outstanding read requests to complete.
3518 * For CPU synchronisation (NULL to) no request is required. For syncing with
3519 * rings to_req must be non-NULL. However, a request does not have to be
3520 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3521 * request will be allocated automatically and returned through *to_req. Note
3522 * that it is not guaranteed that commands will be emitted (because the system
3523 * might already be idle). Hence there is no need to create a request that
3524 * might never have any work submitted. Note further that if a request is
3525 * returned in *to_req, it is the responsibility of the caller to submit
3526 * that request (after potentially adding more work to it).
3528 * Returns 0 if successful, else propagates up the lower layer error.
3531 i915_gem_object_sync(struct drm_i915_gem_object *obj,
3532 struct intel_engine_cs *to,
3533 struct drm_i915_gem_request **to_req)
3535 const bool readonly = obj->base.pending_write_domain == 0;
3536 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3543 return i915_gem_object_wait_rendering(obj, readonly);
3547 if (obj->last_write_req)
3548 req[n++] = obj->last_write_req;
3550 for (i = 0; i < I915_NUM_ENGINES; i++)
3551 if (obj->last_read_req[i])
3552 req[n++] = obj->last_read_req[i];
3554 for (i = 0; i < n; i++) {
3555 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3563 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3565 u32 old_write_domain, old_read_domains;
3567 /* Force a pagefault for domain tracking on next user access */
3568 i915_gem_release_mmap(obj);
3570 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3573 old_read_domains = obj->base.read_domains;
3574 old_write_domain = obj->base.write_domain;
3576 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3577 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3579 trace_i915_gem_object_change_domain(obj,
3584 static void __i915_vma_iounmap(struct i915_vma *vma)
3586 GEM_BUG_ON(vma->pin_count);
3588 if (vma->iomap == NULL)
3591 io_mapping_unmap(vma->iomap);
3595 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3597 struct drm_i915_gem_object *obj = vma->obj;
3598 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3601 if (list_empty(&vma->obj_link))
3604 if (!drm_mm_node_allocated(&vma->node)) {
3605 i915_gem_vma_destroy(vma);
3612 BUG_ON(obj->pages == NULL);
3615 ret = i915_gem_object_wait_rendering(obj, false);
3620 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3621 i915_gem_object_finish_gtt(obj);
3623 /* release the fence reg _after_ flushing */
3624 ret = i915_gem_object_put_fence(obj);
3628 __i915_vma_iounmap(vma);
3631 trace_i915_vma_unbind(vma);
3633 vma->vm->unbind_vma(vma);
3636 list_del_init(&vma->vm_link);
3638 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3639 obj->map_and_fenceable = false;
3640 } else if (vma->ggtt_view.pages) {
3641 sg_free_table(vma->ggtt_view.pages);
3642 kfree(vma->ggtt_view.pages);
3644 vma->ggtt_view.pages = NULL;
3647 drm_mm_remove_node(&vma->node);
3648 i915_gem_vma_destroy(vma);
3650 /* Since the unbound list is global, only move to that list if
3651 * no more VMAs exist. */
3652 if (list_empty(&obj->vma_list))
3653 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3655 /* And finally now the object is completely decoupled from this vma,
3656 * we can drop its hold on the backing storage and allow it to be
3657 * reaped by the shrinker.
3659 i915_gem_object_unpin_pages(obj);
3664 int i915_vma_unbind(struct i915_vma *vma)
3666 return __i915_vma_unbind(vma, true);
3669 int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3671 return __i915_vma_unbind(vma, false);
3674 int i915_gpu_idle(struct drm_device *dev)
3676 struct drm_i915_private *dev_priv = dev->dev_private;
3677 struct intel_engine_cs *engine;
3680 /* Flush everything onto the inactive list. */
3681 for_each_engine(engine, dev_priv) {
3682 if (!i915.enable_execlists) {
3683 struct drm_i915_gem_request *req;
3685 req = i915_gem_request_alloc(engine, NULL);
3687 return PTR_ERR(req);
3689 ret = i915_switch_context(req);
3690 i915_add_request_no_flush(req);
3695 ret = intel_engine_idle(engine);
3700 WARN_ON(i915_verify_lists(dev));
3704 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3705 unsigned long cache_level)
3707 struct drm_mm_node *gtt_space = &vma->node;
3708 struct drm_mm_node *other;
3711 * On some machines we have to be careful when putting differing types
3712 * of snoopable memory together to avoid the prefetcher crossing memory
3713 * domains and dying. During vm initialisation, we decide whether or not
3714 * these constraints apply and set the drm_mm.color_adjust
3717 if (vma->vm->mm.color_adjust == NULL)
3720 if (!drm_mm_node_allocated(gtt_space))
3723 if (list_empty(>t_space->node_list))
3726 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3727 if (other->allocated && !other->hole_follows && other->color != cache_level)
3730 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3731 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3738 * Finds free space in the GTT aperture and binds the object or a view of it
3740 * @obj: object to bind
3741 * @vm: address space to bind into
3742 * @ggtt_view: global gtt view if applicable
3743 * @alignment: requested alignment
3744 * @flags: mask of PIN_* flags to use
3746 static struct i915_vma *
3747 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3748 struct i915_address_space *vm,
3749 const struct i915_ggtt_view *ggtt_view,
3753 struct drm_device *dev = obj->base.dev;
3754 struct drm_i915_private *dev_priv = to_i915(dev);
3755 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3756 u32 fence_alignment, unfenced_alignment;
3757 u32 search_flag, alloc_flag;
3759 u64 size, fence_size;
3760 struct i915_vma *vma;
3763 if (i915_is_ggtt(vm)) {
3766 if (WARN_ON(!ggtt_view))
3767 return ERR_PTR(-EINVAL);
3769 view_size = i915_ggtt_view_size(obj, ggtt_view);
3771 fence_size = i915_gem_get_gtt_size(dev,
3774 fence_alignment = i915_gem_get_gtt_alignment(dev,
3778 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3782 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3784 fence_size = i915_gem_get_gtt_size(dev,
3787 fence_alignment = i915_gem_get_gtt_alignment(dev,
3791 unfenced_alignment =
3792 i915_gem_get_gtt_alignment(dev,
3796 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3799 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3801 if (flags & PIN_MAPPABLE)
3802 end = min_t(u64, end, ggtt->mappable_end);
3803 if (flags & PIN_ZONE_4G)
3804 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3807 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3809 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3810 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3811 ggtt_view ? ggtt_view->type : 0,
3813 return ERR_PTR(-EINVAL);
3816 /* If binding the object/GGTT view requires more space than the entire
3817 * aperture has, reject it early before evicting everything in a vain
3818 * attempt to find space.
3821 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3822 ggtt_view ? ggtt_view->type : 0,
3824 flags & PIN_MAPPABLE ? "mappable" : "total",
3826 return ERR_PTR(-E2BIG);
3829 ret = i915_gem_object_get_pages(obj);
3831 return ERR_PTR(ret);
3833 i915_gem_object_pin_pages(obj);
3835 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3836 i915_gem_obj_lookup_or_create_vma(obj, vm);
3841 if (flags & PIN_OFFSET_FIXED) {
3842 uint64_t offset = flags & PIN_OFFSET_MASK;
3844 if (offset & (alignment - 1) || offset + size > end) {
3848 vma->node.start = offset;
3849 vma->node.size = size;
3850 vma->node.color = obj->cache_level;
3851 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3853 ret = i915_gem_evict_for_vma(vma);
3855 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3860 if (flags & PIN_HIGH) {
3861 search_flag = DRM_MM_SEARCH_BELOW;
3862 alloc_flag = DRM_MM_CREATE_TOP;
3864 search_flag = DRM_MM_SEARCH_DEFAULT;
3865 alloc_flag = DRM_MM_CREATE_DEFAULT;
3869 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3876 ret = i915_gem_evict_something(dev, vm, size, alignment,
3886 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3888 goto err_remove_node;
3891 trace_i915_vma_bind(vma, flags);
3892 ret = i915_vma_bind(vma, obj->cache_level, flags);
3894 goto err_remove_node;
3896 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3897 list_add_tail(&vma->vm_link, &vm->inactive_list);
3902 drm_mm_remove_node(&vma->node);
3904 i915_gem_vma_destroy(vma);
3907 i915_gem_object_unpin_pages(obj);
3912 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3915 /* If we don't have a page list set up, then we're not pinned
3916 * to GPU, and we can ignore the cache flush because it'll happen
3917 * again at bind time.
3919 if (obj->pages == NULL)
3923 * Stolen memory is always coherent with the GPU as it is explicitly
3924 * marked as wc by the system, or the system is cache-coherent.
3926 if (obj->stolen || obj->phys_handle)
3929 /* If the GPU is snooping the contents of the CPU cache,
3930 * we do not need to manually clear the CPU cache lines. However,
3931 * the caches are only snooped when the render cache is
3932 * flushed/invalidated. As we always have to emit invalidations
3933 * and flushes when moving into and out of the RENDER domain, correct
3934 * snooping behaviour occurs naturally as the result of our domain
3937 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3938 obj->cache_dirty = true;
3942 trace_i915_gem_object_clflush(obj);
3943 drm_clflush_sg(obj->pages);
3944 obj->cache_dirty = false;
3949 /** Flushes the GTT write domain for the object if it's dirty. */
3951 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3953 uint32_t old_write_domain;
3955 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3958 /* No actual flushing is required for the GTT write domain. Writes
3959 * to it immediately go to main memory as far as we know, so there's
3960 * no chipset flush. It also doesn't land in render cache.
3962 * However, we do have to enforce the order so that all writes through
3963 * the GTT land before any writes to the device, such as updates to
3968 old_write_domain = obj->base.write_domain;
3969 obj->base.write_domain = 0;
3971 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3973 trace_i915_gem_object_change_domain(obj,
3974 obj->base.read_domains,
3978 /** Flushes the CPU write domain for the object if it's dirty. */
3980 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3982 uint32_t old_write_domain;
3984 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3987 if (i915_gem_clflush_object(obj, obj->pin_display))
3988 i915_gem_chipset_flush(to_i915(obj->base.dev));
3990 old_write_domain = obj->base.write_domain;
3991 obj->base.write_domain = 0;
3993 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3995 trace_i915_gem_object_change_domain(obj,
3996 obj->base.read_domains,
4001 * Moves a single object to the GTT read, and possibly write domain.
4002 * @obj: object to act on
4003 * @write: ask for write access or read only
4005 * This function returns when the move is complete, including waiting on
4009 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
4011 struct drm_device *dev = obj->base.dev;
4012 struct drm_i915_private *dev_priv = to_i915(dev);
4013 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4014 uint32_t old_write_domain, old_read_domains;
4015 struct i915_vma *vma;
4018 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
4021 ret = i915_gem_object_wait_rendering(obj, !write);
4025 /* Flush and acquire obj->pages so that we are coherent through
4026 * direct access in memory with previous cached writes through
4027 * shmemfs and that our cache domain tracking remains valid.
4028 * For example, if the obj->filp was moved to swap without us
4029 * being notified and releasing the pages, we would mistakenly
4030 * continue to assume that the obj remained out of the CPU cached
4033 ret = i915_gem_object_get_pages(obj);
4037 i915_gem_object_flush_cpu_write_domain(obj);
4039 /* Serialise direct access to this object with the barriers for
4040 * coherent writes from the GPU, by effectively invalidating the
4041 * GTT domain upon first access.
4043 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
4046 old_write_domain = obj->base.write_domain;
4047 old_read_domains = obj->base.read_domains;
4049 /* It should now be out of any other write domains, and we can update
4050 * the domain values for our changes.
4052 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
4053 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4055 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
4056 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
4060 trace_i915_gem_object_change_domain(obj,
4064 /* And bump the LRU for this access */
4065 vma = i915_gem_obj_to_ggtt(obj);
4066 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
4067 list_move_tail(&vma->vm_link,
4068 &ggtt->base.inactive_list);
4074 * Changes the cache-level of an object across all VMA.
4075 * @obj: object to act on
4076 * @cache_level: new cache level to set for the object
4078 * After this function returns, the object will be in the new cache-level
4079 * across all GTT and the contents of the backing storage will be coherent,
4080 * with respect to the new cache-level. In order to keep the backing storage
4081 * coherent for all users, we only allow a single cache level to be set
4082 * globally on the object and prevent it from being changed whilst the
4083 * hardware is reading from the object. That is if the object is currently
4084 * on the scanout it will be set to uncached (or equivalent display
4085 * cache coherency) and all non-MOCS GPU access will also be uncached so
4086 * that all direct access to the scanout remains coherent.
4088 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
4089 enum i915_cache_level cache_level)
4091 struct drm_device *dev = obj->base.dev;
4092 struct i915_vma *vma, *next;
4096 if (obj->cache_level == cache_level)
4099 /* Inspect the list of currently bound VMA and unbind any that would
4100 * be invalid given the new cache-level. This is principally to
4101 * catch the issue of the CS prefetch crossing page boundaries and
4102 * reading an invalid PTE on older architectures.
4104 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4105 if (!drm_mm_node_allocated(&vma->node))
4108 if (vma->pin_count) {
4109 DRM_DEBUG("can not change the cache level of pinned objects\n");
4113 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
4114 ret = i915_vma_unbind(vma);
4121 /* We can reuse the existing drm_mm nodes but need to change the
4122 * cache-level on the PTE. We could simply unbind them all and
4123 * rebind with the correct cache-level on next use. However since
4124 * we already have a valid slot, dma mapping, pages etc, we may as
4125 * rewrite the PTE in the belief that doing so tramples upon less
4126 * state and so involves less work.
4129 /* Before we change the PTE, the GPU must not be accessing it.
4130 * If we wait upon the object, we know that all the bound
4131 * VMA are no longer active.
4133 ret = i915_gem_object_wait_rendering(obj, false);
4137 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
4138 /* Access to snoopable pages through the GTT is
4139 * incoherent and on some machines causes a hard
4140 * lockup. Relinquish the CPU mmaping to force
4141 * userspace to refault in the pages and we can
4142 * then double check if the GTT mapping is still
4143 * valid for that pointer access.
4145 i915_gem_release_mmap(obj);
4147 /* As we no longer need a fence for GTT access,
4148 * we can relinquish it now (and so prevent having
4149 * to steal a fence from someone else on the next
4150 * fence request). Note GPU activity would have
4151 * dropped the fence as all snoopable access is
4152 * supposed to be linear.
4154 ret = i915_gem_object_put_fence(obj);
4158 /* We either have incoherent backing store and
4159 * so no GTT access or the architecture is fully
4160 * coherent. In such cases, existing GTT mmaps
4161 * ignore the cache bit in the PTE and we can
4162 * rewrite it without confusing the GPU or having
4163 * to force userspace to fault back in its mmaps.
4167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4168 if (!drm_mm_node_allocated(&vma->node))
4171 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
4177 list_for_each_entry(vma, &obj->vma_list, obj_link)
4178 vma->node.color = cache_level;
4179 obj->cache_level = cache_level;
4182 /* Flush the dirty CPU caches to the backing storage so that the
4183 * object is now coherent at its new cache level (with respect
4184 * to the access domain).
4186 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
4187 if (i915_gem_clflush_object(obj, true))
4188 i915_gem_chipset_flush(to_i915(obj->base.dev));
4194 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4195 struct drm_file *file)
4197 struct drm_i915_gem_caching *args = data;
4198 struct drm_i915_gem_object *obj;
4200 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4201 if (&obj->base == NULL)
4204 switch (obj->cache_level) {
4205 case I915_CACHE_LLC:
4206 case I915_CACHE_L3_LLC:
4207 args->caching = I915_CACHING_CACHED;
4211 args->caching = I915_CACHING_DISPLAY;
4215 args->caching = I915_CACHING_NONE;
4219 drm_gem_object_unreference_unlocked(&obj->base);
4223 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4224 struct drm_file *file)
4226 struct drm_i915_private *dev_priv = dev->dev_private;
4227 struct drm_i915_gem_caching *args = data;
4228 struct drm_i915_gem_object *obj;
4229 enum i915_cache_level level;
4232 switch (args->caching) {
4233 case I915_CACHING_NONE:
4234 level = I915_CACHE_NONE;
4236 case I915_CACHING_CACHED:
4238 * Due to a HW issue on BXT A stepping, GPU stores via a
4239 * snooped mapping may leave stale data in a corresponding CPU
4240 * cacheline, whereas normally such cachelines would get
4243 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
4246 level = I915_CACHE_LLC;
4248 case I915_CACHING_DISPLAY:
4249 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
4255 intel_runtime_pm_get(dev_priv);
4257 ret = i915_mutex_lock_interruptible(dev);
4261 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4262 if (&obj->base == NULL) {
4267 ret = i915_gem_object_set_cache_level(obj, level);
4269 drm_gem_object_unreference(&obj->base);
4271 mutex_unlock(&dev->struct_mutex);
4273 intel_runtime_pm_put(dev_priv);
4279 * Prepare buffer for display plane (scanout, cursors, etc).
4280 * Can be called from an uninterruptible phase (modesetting) and allows
4281 * any flushes to be pipelined (for pageflips).
4284 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4286 const struct i915_ggtt_view *view)
4288 u32 old_read_domains, old_write_domain;
4291 /* Mark the pin_display early so that we account for the
4292 * display coherency whilst setting up the cache domains.
4296 /* The display engine is not coherent with the LLC cache on gen6. As
4297 * a result, we make sure that the pinning that is about to occur is
4298 * done with uncached PTEs. This is lowest common denominator for all
4301 * However for gen6+, we could do better by using the GFDT bit instead
4302 * of uncaching, which would allow us to flush all the LLC-cached data
4303 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4305 ret = i915_gem_object_set_cache_level(obj,
4306 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4308 goto err_unpin_display;
4310 /* As the user may map the buffer once pinned in the display plane
4311 * (e.g. libkms for the bootup splash), we have to ensure that we
4312 * always use map_and_fenceable for all scanout buffers.
4314 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4315 view->type == I915_GGTT_VIEW_NORMAL ?
4318 goto err_unpin_display;
4320 i915_gem_object_flush_cpu_write_domain(obj);
4322 old_write_domain = obj->base.write_domain;
4323 old_read_domains = obj->base.read_domains;
4325 /* It should now be out of any other write domains, and we can update
4326 * the domain values for our changes.
4328 obj->base.write_domain = 0;
4329 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4331 trace_i915_gem_object_change_domain(obj,
4343 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4344 const struct i915_ggtt_view *view)
4346 if (WARN_ON(obj->pin_display == 0))
4349 i915_gem_object_ggtt_unpin_view(obj, view);
4355 * Moves a single object to the CPU read, and possibly write domain.
4356 * @obj: object to act on
4357 * @write: requesting write or read-only access
4359 * This function returns when the move is complete, including waiting on
4363 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4365 uint32_t old_write_domain, old_read_domains;
4368 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4371 ret = i915_gem_object_wait_rendering(obj, !write);
4375 i915_gem_object_flush_gtt_write_domain(obj);
4377 old_write_domain = obj->base.write_domain;
4378 old_read_domains = obj->base.read_domains;
4380 /* Flush the CPU cache if it's still invalid. */
4381 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4382 i915_gem_clflush_object(obj, false);
4384 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4387 /* It should now be out of any other write domains, and we can update
4388 * the domain values for our changes.
4390 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4392 /* If we're writing through the CPU, then the GPU read domains will
4393 * need to be invalidated at next use.
4396 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4397 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4400 trace_i915_gem_object_change_domain(obj,
4407 /* Throttle our rendering by waiting until the ring has completed our requests
4408 * emitted over 20 msec ago.
4410 * Note that if we were to use the current jiffies each time around the loop,
4411 * we wouldn't escape the function with any frames outstanding if the time to
4412 * render a frame was over 20ms.
4414 * This should get us reasonable parallelism between CPU and GPU but also
4415 * relatively low latency when blocking on a particular request to finish.
4418 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4420 struct drm_i915_private *dev_priv = dev->dev_private;
4421 struct drm_i915_file_private *file_priv = file->driver_priv;
4422 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4423 struct drm_i915_gem_request *request, *target = NULL;
4426 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4430 /* ABI: return -EIO if already wedged */
4431 if (i915_terminally_wedged(&dev_priv->gpu_error))
4434 spin_lock(&file_priv->mm.lock);
4435 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4436 if (time_after_eq(request->emitted_jiffies, recent_enough))
4440 * Note that the request might not have been submitted yet.
4441 * In which case emitted_jiffies will be zero.
4443 if (!request->emitted_jiffies)
4449 i915_gem_request_reference(target);
4450 spin_unlock(&file_priv->mm.lock);
4455 ret = __i915_wait_request(target, true, NULL, NULL);
4457 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4459 i915_gem_request_unreference(target);
4465 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4467 struct drm_i915_gem_object *obj = vma->obj;
4470 vma->node.start & (alignment - 1))
4473 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4476 if (flags & PIN_OFFSET_BIAS &&
4477 vma->node.start < (flags & PIN_OFFSET_MASK))
4480 if (flags & PIN_OFFSET_FIXED &&
4481 vma->node.start != (flags & PIN_OFFSET_MASK))
4487 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4489 struct drm_i915_gem_object *obj = vma->obj;
4490 bool mappable, fenceable;
4491 u32 fence_size, fence_alignment;
4493 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4496 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4501 fenceable = (vma->node.size == fence_size &&
4502 (vma->node.start & (fence_alignment - 1)) == 0);
4504 mappable = (vma->node.start + fence_size <=
4505 to_i915(obj->base.dev)->ggtt.mappable_end);
4507 obj->map_and_fenceable = mappable && fenceable;
4511 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4512 struct i915_address_space *vm,
4513 const struct i915_ggtt_view *ggtt_view,
4517 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4518 struct i915_vma *vma;
4522 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4525 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4528 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4531 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4534 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4535 i915_gem_obj_to_vma(obj, vm);
4538 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4541 if (i915_vma_misplaced(vma, alignment, flags)) {
4542 WARN(vma->pin_count,
4543 "bo is already pinned in %s with incorrect alignment:"
4544 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4545 " obj->map_and_fenceable=%d\n",
4546 ggtt_view ? "ggtt" : "ppgtt",
4547 upper_32_bits(vma->node.start),
4548 lower_32_bits(vma->node.start),
4550 !!(flags & PIN_MAPPABLE),
4551 obj->map_and_fenceable);
4552 ret = i915_vma_unbind(vma);
4560 bound = vma ? vma->bound : 0;
4561 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4562 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4565 return PTR_ERR(vma);
4567 ret = i915_vma_bind(vma, obj->cache_level, flags);
4572 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4573 (bound ^ vma->bound) & GLOBAL_BIND) {
4574 __i915_vma_set_map_and_fenceable(vma);
4575 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4583 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4584 struct i915_address_space *vm,
4588 return i915_gem_object_do_pin(obj, vm,
4589 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4594 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4595 const struct i915_ggtt_view *view,
4599 struct drm_device *dev = obj->base.dev;
4600 struct drm_i915_private *dev_priv = to_i915(dev);
4601 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4605 return i915_gem_object_do_pin(obj, &ggtt->base, view,
4606 alignment, flags | PIN_GLOBAL);
4610 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4611 const struct i915_ggtt_view *view)
4613 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4615 WARN_ON(vma->pin_count == 0);
4616 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4622 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4623 struct drm_file *file)
4625 struct drm_i915_gem_busy *args = data;
4626 struct drm_i915_gem_object *obj;
4629 ret = i915_mutex_lock_interruptible(dev);
4633 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4634 if (&obj->base == NULL) {
4639 /* Count all active objects as busy, even if they are currently not used
4640 * by the gpu. Users of this interface expect objects to eventually
4641 * become non-busy without any further actions, therefore emit any
4642 * necessary flushes here.
4644 ret = i915_gem_object_flush_active(obj);
4652 for (i = 0; i < I915_NUM_ENGINES; i++) {
4653 struct drm_i915_gem_request *req;
4655 req = obj->last_read_req[i];
4657 args->busy |= 1 << (16 + req->engine->exec_id);
4659 if (obj->last_write_req)
4660 args->busy |= obj->last_write_req->engine->exec_id;
4664 drm_gem_object_unreference(&obj->base);
4666 mutex_unlock(&dev->struct_mutex);
4671 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4672 struct drm_file *file_priv)
4674 return i915_gem_ring_throttle(dev, file_priv);
4678 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4679 struct drm_file *file_priv)
4681 struct drm_i915_private *dev_priv = dev->dev_private;
4682 struct drm_i915_gem_madvise *args = data;
4683 struct drm_i915_gem_object *obj;
4686 switch (args->madv) {
4687 case I915_MADV_DONTNEED:
4688 case I915_MADV_WILLNEED:
4694 ret = i915_mutex_lock_interruptible(dev);
4698 obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
4699 if (&obj->base == NULL) {
4704 if (i915_gem_obj_is_pinned(obj)) {
4710 obj->tiling_mode != I915_TILING_NONE &&
4711 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4712 if (obj->madv == I915_MADV_WILLNEED)
4713 i915_gem_object_unpin_pages(obj);
4714 if (args->madv == I915_MADV_WILLNEED)
4715 i915_gem_object_pin_pages(obj);
4718 if (obj->madv != __I915_MADV_PURGED)
4719 obj->madv = args->madv;
4721 /* if the object is no longer attached, discard its backing storage */
4722 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4723 i915_gem_object_truncate(obj);
4725 args->retained = obj->madv != __I915_MADV_PURGED;
4728 drm_gem_object_unreference(&obj->base);
4730 mutex_unlock(&dev->struct_mutex);
4734 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4735 const struct drm_i915_gem_object_ops *ops)
4739 INIT_LIST_HEAD(&obj->global_list);
4740 for (i = 0; i < I915_NUM_ENGINES; i++)
4741 INIT_LIST_HEAD(&obj->engine_list[i]);
4742 INIT_LIST_HEAD(&obj->obj_exec_link);
4743 INIT_LIST_HEAD(&obj->vma_list);
4744 INIT_LIST_HEAD(&obj->batch_pool_link);
4748 obj->fence_reg = I915_FENCE_REG_NONE;
4749 obj->madv = I915_MADV_WILLNEED;
4751 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4754 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4755 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4756 .get_pages = i915_gem_object_get_pages_gtt,
4757 .put_pages = i915_gem_object_put_pages_gtt,
4760 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4763 struct drm_i915_gem_object *obj;
4764 struct address_space *mapping;
4768 obj = i915_gem_object_alloc(dev);
4770 return ERR_PTR(-ENOMEM);
4772 ret = drm_gem_object_init(dev, &obj->base, size);
4776 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4777 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4778 /* 965gm cannot relocate objects above 4GiB. */
4779 mask &= ~__GFP_HIGHMEM;
4780 mask |= __GFP_DMA32;
4783 mapping = file_inode(obj->base.filp)->i_mapping;
4784 mapping_set_gfp_mask(mapping, mask);
4786 i915_gem_object_init(obj, &i915_gem_object_ops);
4788 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4789 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4792 /* On some devices, we can have the GPU use the LLC (the CPU
4793 * cache) for about a 10% performance improvement
4794 * compared to uncached. Graphics requests other than
4795 * display scanout are coherent with the CPU in
4796 * accessing this cache. This means in this mode we
4797 * don't need to clflush on the CPU side, and on the
4798 * GPU side we only need to flush internal caches to
4799 * get data visible to the CPU.
4801 * However, we maintain the display planes as UC, and so
4802 * need to rebind when first used as such.
4804 obj->cache_level = I915_CACHE_LLC;
4806 obj->cache_level = I915_CACHE_NONE;
4808 trace_i915_gem_object_create(obj);
4813 i915_gem_object_free(obj);
4815 return ERR_PTR(ret);
4818 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4820 /* If we are the last user of the backing storage (be it shmemfs
4821 * pages or stolen etc), we know that the pages are going to be
4822 * immediately released. In this case, we can then skip copying
4823 * back the contents from the GPU.
4826 if (obj->madv != I915_MADV_WILLNEED)
4829 if (obj->base.filp == NULL)
4832 /* At first glance, this looks racy, but then again so would be
4833 * userspace racing mmap against close. However, the first external
4834 * reference to the filp can only be obtained through the
4835 * i915_gem_mmap_ioctl() which safeguards us against the user
4836 * acquiring such a reference whilst we are in the middle of
4837 * freeing the object.
4839 return atomic_long_read(&obj->base.filp->f_count) == 1;
4842 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4844 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4845 struct drm_device *dev = obj->base.dev;
4846 struct drm_i915_private *dev_priv = dev->dev_private;
4847 struct i915_vma *vma, *next;
4849 intel_runtime_pm_get(dev_priv);
4851 trace_i915_gem_object_destroy(obj);
4853 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4857 ret = i915_vma_unbind(vma);
4858 if (WARN_ON(ret == -ERESTARTSYS)) {
4859 bool was_interruptible;
4861 was_interruptible = dev_priv->mm.interruptible;
4862 dev_priv->mm.interruptible = false;
4864 WARN_ON(i915_vma_unbind(vma));
4866 dev_priv->mm.interruptible = was_interruptible;
4870 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4871 * before progressing. */
4873 i915_gem_object_unpin_pages(obj);
4875 WARN_ON(obj->frontbuffer_bits);
4877 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4878 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4879 obj->tiling_mode != I915_TILING_NONE)
4880 i915_gem_object_unpin_pages(obj);
4882 if (WARN_ON(obj->pages_pin_count))
4883 obj->pages_pin_count = 0;
4884 if (discard_backing_storage(obj))
4885 obj->madv = I915_MADV_DONTNEED;
4886 i915_gem_object_put_pages(obj);
4887 i915_gem_object_free_mmap_offset(obj);
4891 if (obj->base.import_attach)
4892 drm_prime_gem_destroy(&obj->base, NULL);
4894 if (obj->ops->release)
4895 obj->ops->release(obj);
4897 drm_gem_object_release(&obj->base);
4898 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4901 i915_gem_object_free(obj);
4903 intel_runtime_pm_put(dev_priv);
4906 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4907 struct i915_address_space *vm)
4909 struct i915_vma *vma;
4910 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4911 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4918 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4919 const struct i915_ggtt_view *view)
4921 struct i915_vma *vma;
4925 list_for_each_entry(vma, &obj->vma_list, obj_link)
4926 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4931 void i915_gem_vma_destroy(struct i915_vma *vma)
4933 WARN_ON(vma->node.allocated);
4935 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4936 if (!list_empty(&vma->exec_list))
4940 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4942 list_del(&vma->obj_link);
4944 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4948 i915_gem_stop_engines(struct drm_device *dev)
4950 struct drm_i915_private *dev_priv = dev->dev_private;
4951 struct intel_engine_cs *engine;
4953 for_each_engine(engine, dev_priv)
4954 dev_priv->gt.stop_engine(engine);
4958 i915_gem_suspend(struct drm_device *dev)
4960 struct drm_i915_private *dev_priv = dev->dev_private;
4963 mutex_lock(&dev->struct_mutex);
4964 ret = i915_gpu_idle(dev);
4968 i915_gem_retire_requests(dev_priv);
4970 i915_gem_stop_engines(dev);
4971 i915_gem_context_lost(dev_priv);
4972 mutex_unlock(&dev->struct_mutex);
4974 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4975 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4976 flush_delayed_work(&dev_priv->mm.idle_work);
4978 /* Assert that we sucessfully flushed all the work and
4979 * reset the GPU back to its idle, low power state.
4981 WARN_ON(dev_priv->mm.busy);
4986 mutex_unlock(&dev->struct_mutex);
4990 void i915_gem_init_swizzling(struct drm_device *dev)
4992 struct drm_i915_private *dev_priv = dev->dev_private;
4994 if (INTEL_INFO(dev)->gen < 5 ||
4995 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4998 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4999 DISP_TILE_SURFACE_SWIZZLING);
5004 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5006 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5007 else if (IS_GEN7(dev))
5008 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5009 else if (IS_GEN8(dev))
5010 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5015 static void init_unused_ring(struct drm_device *dev, u32 base)
5017 struct drm_i915_private *dev_priv = dev->dev_private;
5019 I915_WRITE(RING_CTL(base), 0);
5020 I915_WRITE(RING_HEAD(base), 0);
5021 I915_WRITE(RING_TAIL(base), 0);
5022 I915_WRITE(RING_START(base), 0);
5025 static void init_unused_rings(struct drm_device *dev)
5028 init_unused_ring(dev, PRB1_BASE);
5029 init_unused_ring(dev, SRB0_BASE);
5030 init_unused_ring(dev, SRB1_BASE);
5031 init_unused_ring(dev, SRB2_BASE);
5032 init_unused_ring(dev, SRB3_BASE);
5033 } else if (IS_GEN2(dev)) {
5034 init_unused_ring(dev, SRB0_BASE);
5035 init_unused_ring(dev, SRB1_BASE);
5036 } else if (IS_GEN3(dev)) {
5037 init_unused_ring(dev, PRB1_BASE);
5038 init_unused_ring(dev, PRB2_BASE);
5042 int i915_gem_init_engines(struct drm_device *dev)
5044 struct drm_i915_private *dev_priv = dev->dev_private;
5047 ret = intel_init_render_ring_buffer(dev);
5052 ret = intel_init_bsd_ring_buffer(dev);
5054 goto cleanup_render_ring;
5058 ret = intel_init_blt_ring_buffer(dev);
5060 goto cleanup_bsd_ring;
5063 if (HAS_VEBOX(dev)) {
5064 ret = intel_init_vebox_ring_buffer(dev);
5066 goto cleanup_blt_ring;
5069 if (HAS_BSD2(dev)) {
5070 ret = intel_init_bsd2_ring_buffer(dev);
5072 goto cleanup_vebox_ring;
5078 intel_cleanup_engine(&dev_priv->engine[VECS]);
5080 intel_cleanup_engine(&dev_priv->engine[BCS]);
5082 intel_cleanup_engine(&dev_priv->engine[VCS]);
5083 cleanup_render_ring:
5084 intel_cleanup_engine(&dev_priv->engine[RCS]);
5090 i915_gem_init_hw(struct drm_device *dev)
5092 struct drm_i915_private *dev_priv = dev->dev_private;
5093 struct intel_engine_cs *engine;
5096 /* Double layer security blanket, see i915_gem_init() */
5097 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5099 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
5100 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5102 if (IS_HASWELL(dev))
5103 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
5104 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5106 if (HAS_PCH_NOP(dev)) {
5107 if (IS_IVYBRIDGE(dev)) {
5108 u32 temp = I915_READ(GEN7_MSG_CTL);
5109 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5110 I915_WRITE(GEN7_MSG_CTL, temp);
5111 } else if (INTEL_INFO(dev)->gen >= 7) {
5112 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5113 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5114 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5118 i915_gem_init_swizzling(dev);
5121 * At least 830 can leave some of the unused rings
5122 * "active" (ie. head != tail) after resume which
5123 * will prevent c3 entry. Makes sure all unused rings
5126 init_unused_rings(dev);
5128 BUG_ON(!dev_priv->kernel_context);
5130 ret = i915_ppgtt_init_hw(dev);
5132 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5136 /* Need to do basic initialisation of all rings first: */
5137 for_each_engine(engine, dev_priv) {
5138 ret = engine->init_hw(engine);
5143 intel_mocs_init_l3cc_table(dev);
5145 /* We can't enable contexts until all firmware is loaded */
5146 ret = intel_guc_setup(dev);
5151 * Increment the next seqno by 0x100 so we have a visible break
5152 * on re-initialisation
5154 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
5157 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5161 int i915_gem_init(struct drm_device *dev)
5163 struct drm_i915_private *dev_priv = dev->dev_private;
5166 mutex_lock(&dev->struct_mutex);
5168 if (!i915.enable_execlists) {
5169 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
5170 dev_priv->gt.init_engines = i915_gem_init_engines;
5171 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
5172 dev_priv->gt.stop_engine = intel_stop_engine;
5174 dev_priv->gt.execbuf_submit = intel_execlists_submission;
5175 dev_priv->gt.init_engines = intel_logical_rings_init;
5176 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5177 dev_priv->gt.stop_engine = intel_logical_ring_stop;
5180 /* This is just a security blanket to placate dragons.
5181 * On some systems, we very sporadically observe that the first TLBs
5182 * used by the CS may be stale, despite us poking the TLB reset. If
5183 * we hold the forcewake during initialisation these problems
5184 * just magically go away.
5186 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5188 i915_gem_init_userptr(dev_priv);
5189 i915_gem_init_ggtt(dev);
5191 ret = i915_gem_context_init(dev);
5195 ret = dev_priv->gt.init_engines(dev);
5199 ret = i915_gem_init_hw(dev);
5201 /* Allow ring initialisation to fail by marking the GPU as
5202 * wedged. But we only want to do this where the GPU is angry,
5203 * for all other failure, such as an allocation failure, bail.
5205 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5206 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5211 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5212 mutex_unlock(&dev->struct_mutex);
5218 i915_gem_cleanup_engines(struct drm_device *dev)
5220 struct drm_i915_private *dev_priv = dev->dev_private;
5221 struct intel_engine_cs *engine;
5223 for_each_engine(engine, dev_priv)
5224 dev_priv->gt.cleanup_engine(engine);
5228 init_engine_lists(struct intel_engine_cs *engine)
5230 INIT_LIST_HEAD(&engine->active_list);
5231 INIT_LIST_HEAD(&engine->request_list);
5235 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5237 struct drm_device *dev = dev_priv->dev;
5239 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5240 !IS_CHERRYVIEW(dev_priv))
5241 dev_priv->num_fence_regs = 32;
5242 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
5243 IS_I945GM(dev_priv) || IS_G33(dev_priv))
5244 dev_priv->num_fence_regs = 16;
5246 dev_priv->num_fence_regs = 8;
5248 if (intel_vgpu_active(dev_priv))
5249 dev_priv->num_fence_regs =
5250 I915_READ(vgtif_reg(avail_rs.fence_num));
5252 /* Initialize fence registers to zero */
5253 i915_gem_restore_fences(dev);
5255 i915_gem_detect_bit_6_swizzle(dev);
5259 i915_gem_load_init(struct drm_device *dev)
5261 struct drm_i915_private *dev_priv = dev->dev_private;
5265 kmem_cache_create("i915_gem_object",
5266 sizeof(struct drm_i915_gem_object), 0,
5270 kmem_cache_create("i915_gem_vma",
5271 sizeof(struct i915_vma), 0,
5274 dev_priv->requests =
5275 kmem_cache_create("i915_gem_request",
5276 sizeof(struct drm_i915_gem_request), 0,
5280 INIT_LIST_HEAD(&dev_priv->vm_list);
5281 INIT_LIST_HEAD(&dev_priv->context_list);
5282 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5283 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5284 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5285 for (i = 0; i < I915_NUM_ENGINES; i++)
5286 init_engine_lists(&dev_priv->engine[i]);
5287 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5288 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5289 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5290 i915_gem_retire_work_handler);
5291 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5292 i915_gem_idle_work_handler);
5293 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5295 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5298 * Set initial sequence number for requests.
5299 * Using this number allows the wraparound to happen early,
5300 * catching any obvious problems.
5302 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5303 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5305 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5307 init_waitqueue_head(&dev_priv->pending_flip_queue);
5309 dev_priv->mm.interruptible = true;
5311 mutex_init(&dev_priv->fb_tracking.lock);
5314 void i915_gem_load_cleanup(struct drm_device *dev)
5316 struct drm_i915_private *dev_priv = to_i915(dev);
5318 kmem_cache_destroy(dev_priv->requests);
5319 kmem_cache_destroy(dev_priv->vmas);
5320 kmem_cache_destroy(dev_priv->objects);
5323 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5325 struct drm_i915_gem_object *obj;
5327 /* Called just before we write the hibernation image.
5329 * We need to update the domain tracking to reflect that the CPU
5330 * will be accessing all the pages to create and restore from the
5331 * hibernation, and so upon restoration those pages will be in the
5334 * To make sure the hibernation image contains the latest state,
5335 * we update that state just before writing out the image.
5338 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5339 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5340 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5343 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5344 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5345 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5351 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5353 struct drm_i915_file_private *file_priv = file->driver_priv;
5355 /* Clean up our request list when the client is going away, so that
5356 * later retire_requests won't dereference our soon-to-be-gone
5359 spin_lock(&file_priv->mm.lock);
5360 while (!list_empty(&file_priv->mm.request_list)) {
5361 struct drm_i915_gem_request *request;
5363 request = list_first_entry(&file_priv->mm.request_list,
5364 struct drm_i915_gem_request,
5366 list_del(&request->client_list);
5367 request->file_priv = NULL;
5369 spin_unlock(&file_priv->mm.lock);
5371 if (!list_empty(&file_priv->rps.link)) {
5372 spin_lock(&to_i915(dev)->rps.client_lock);
5373 list_del(&file_priv->rps.link);
5374 spin_unlock(&to_i915(dev)->rps.client_lock);
5378 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5380 struct drm_i915_file_private *file_priv;
5383 DRM_DEBUG_DRIVER("\n");
5385 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5389 file->driver_priv = file_priv;
5390 file_priv->dev_priv = dev->dev_private;
5391 file_priv->file = file;
5392 INIT_LIST_HEAD(&file_priv->rps.link);
5394 spin_lock_init(&file_priv->mm.lock);
5395 INIT_LIST_HEAD(&file_priv->mm.request_list);
5397 file_priv->bsd_ring = -1;
5399 ret = i915_gem_context_open(dev, file);
5407 * i915_gem_track_fb - update frontbuffer tracking
5408 * @old: current GEM buffer for the frontbuffer slots
5409 * @new: new GEM buffer for the frontbuffer slots
5410 * @frontbuffer_bits: bitmask of frontbuffer slots
5412 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5413 * from @old and setting them in @new. Both @old and @new can be NULL.
5415 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5416 struct drm_i915_gem_object *new,
5417 unsigned frontbuffer_bits)
5420 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5421 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5422 old->frontbuffer_bits &= ~frontbuffer_bits;
5426 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5427 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5428 new->frontbuffer_bits |= frontbuffer_bits;
5432 /* All the new VM stuff */
5433 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5434 struct i915_address_space *vm)
5436 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5437 struct i915_vma *vma;
5439 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5441 list_for_each_entry(vma, &o->vma_list, obj_link) {
5443 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5446 return vma->node.start;
5449 WARN(1, "%s vma for this object not found.\n",
5450 i915_is_ggtt(vm) ? "global" : "ppgtt");
5454 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5455 const struct i915_ggtt_view *view)
5457 struct i915_vma *vma;
5459 list_for_each_entry(vma, &o->vma_list, obj_link)
5460 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5461 return vma->node.start;
5463 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5467 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5468 struct i915_address_space *vm)
5470 struct i915_vma *vma;
5472 list_for_each_entry(vma, &o->vma_list, obj_link) {
5474 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5476 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5483 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5484 const struct i915_ggtt_view *view)
5486 struct i915_vma *vma;
5488 list_for_each_entry(vma, &o->vma_list, obj_link)
5490 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5491 drm_mm_node_allocated(&vma->node))
5497 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5499 struct i915_vma *vma;
5501 list_for_each_entry(vma, &o->vma_list, obj_link)
5502 if (drm_mm_node_allocated(&vma->node))
5508 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5510 struct i915_vma *vma;
5512 GEM_BUG_ON(list_empty(&o->vma_list));
5514 list_for_each_entry(vma, &o->vma_list, obj_link) {
5516 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5517 return vma->node.size;
5523 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5525 struct i915_vma *vma;
5526 list_for_each_entry(vma, &obj->vma_list, obj_link)
5527 if (vma->pin_count > 0)
5533 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5535 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5539 /* Only default objects have per-page dirty tracking */
5540 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
5543 page = i915_gem_object_get_page(obj, n);
5544 set_page_dirty(page);
5548 /* Allocate a new GEM object and fill it with the supplied data */
5549 struct drm_i915_gem_object *
5550 i915_gem_object_create_from_data(struct drm_device *dev,
5551 const void *data, size_t size)
5553 struct drm_i915_gem_object *obj;
5554 struct sg_table *sg;
5558 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5562 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5566 ret = i915_gem_object_get_pages(obj);
5570 i915_gem_object_pin_pages(obj);
5572 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5573 obj->dirty = 1; /* Backing store is now out of date */
5574 i915_gem_object_unpin_pages(obj);
5576 if (WARN_ON(bytes != size)) {
5577 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5585 drm_gem_object_unreference(&obj->base);
5586 return ERR_PTR(ret);