2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_mocs.h"
36 #include <linux/shmem_fs.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/pci.h>
40 #include <linux/dma-buf.h>
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
45 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
47 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
49 static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
52 return HAS_LLC(dev) || level != I915_CACHE_NONE;
55 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
57 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
60 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
63 return obj->pin_display;
67 insert_mappable_node(struct drm_i915_private *i915,
68 struct drm_mm_node *node, u32 size)
70 memset(node, 0, sizeof(*node));
71 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
73 i915->ggtt.mappable_end,
74 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
79 remove_mappable_node(struct drm_mm_node *node)
81 drm_mm_remove_node(node);
84 /* some bookkeeping */
85 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88 spin_lock(&dev_priv->mm.object_stat_lock);
89 dev_priv->mm.object_count++;
90 dev_priv->mm.object_memory += size;
91 spin_unlock(&dev_priv->mm.object_stat_lock);
94 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97 spin_lock(&dev_priv->mm.object_stat_lock);
98 dev_priv->mm.object_count--;
99 dev_priv->mm.object_memory -= size;
100 spin_unlock(&dev_priv->mm.object_stat_lock);
104 i915_gem_wait_for_error(struct i915_gpu_error *error)
108 if (!i915_reset_in_progress(error))
112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
113 * userspace. If it takes that long something really bad is going on and
114 * we should simply try to bail out and fail as gracefully as possible.
116 ret = wait_event_interruptible_timeout(error->reset_queue,
117 !i915_reset_in_progress(error),
120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
122 } else if (ret < 0) {
129 int i915_mutex_lock_interruptible(struct drm_device *dev)
131 struct drm_i915_private *dev_priv = to_i915(dev);
134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
138 ret = mutex_lock_interruptible(&dev->struct_mutex);
142 WARN_ON(i915_verify_lists(dev));
147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148 struct drm_file *file)
150 struct drm_i915_private *dev_priv = to_i915(dev);
151 struct i915_ggtt *ggtt = &dev_priv->ggtt;
152 struct drm_i915_gem_get_aperture *args = data;
153 struct i915_vma *vma;
157 mutex_lock(&dev->struct_mutex);
158 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
160 pinned += vma->node.size;
161 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
163 pinned += vma->node.size;
164 mutex_unlock(&dev->struct_mutex);
166 args->aper_size = ggtt->base.total;
167 args->aper_available_size = args->aper_size - pinned;
173 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
175 struct address_space *mapping = obj->base.filp->f_mapping;
176 char *vaddr = obj->phys_handle->vaddr;
178 struct scatterlist *sg;
181 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
184 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
188 page = shmem_read_mapping_page(mapping, i);
190 return PTR_ERR(page);
192 src = kmap_atomic(page);
193 memcpy(vaddr, src, PAGE_SIZE);
194 drm_clflush_virt_range(vaddr, PAGE_SIZE);
201 i915_gem_chipset_flush(to_i915(obj->base.dev));
203 st = kmalloc(sizeof(*st), GFP_KERNEL);
207 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
214 sg->length = obj->base.size;
216 sg_dma_address(sg) = obj->phys_handle->busaddr;
217 sg_dma_len(sg) = obj->base.size;
224 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
228 BUG_ON(obj->madv == __I915_MADV_PURGED);
230 ret = i915_gem_object_set_to_cpu_domain(obj, true);
232 /* In the event of a disaster, abandon all caches and
235 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
238 if (obj->madv == I915_MADV_DONTNEED)
242 struct address_space *mapping = obj->base.filp->f_mapping;
243 char *vaddr = obj->phys_handle->vaddr;
246 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
250 page = shmem_read_mapping_page(mapping, i);
254 dst = kmap_atomic(page);
255 drm_clflush_virt_range(vaddr, PAGE_SIZE);
256 memcpy(dst, vaddr, PAGE_SIZE);
259 set_page_dirty(page);
260 if (obj->madv == I915_MADV_WILLNEED)
261 mark_page_accessed(page);
268 sg_free_table(obj->pages);
273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
275 drm_pci_free(obj->base.dev, obj->phys_handle);
278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
279 .get_pages = i915_gem_object_get_pages_phys,
280 .put_pages = i915_gem_object_put_pages_phys,
281 .release = i915_gem_object_release_phys,
285 drop_pages(struct drm_i915_gem_object *obj)
287 struct i915_vma *vma, *next;
290 drm_gem_object_reference(&obj->base);
291 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
292 if (i915_vma_unbind(vma))
295 ret = i915_gem_object_put_pages(obj);
296 drm_gem_object_unreference(&obj->base);
302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
305 drm_dma_handle_t *phys;
308 if (obj->phys_handle) {
309 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
315 if (obj->madv != I915_MADV_WILLNEED)
318 if (obj->base.filp == NULL)
321 ret = drop_pages(obj);
325 /* create a new object */
326 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
330 obj->phys_handle = phys;
331 obj->ops = &i915_gem_phys_ops;
333 return i915_gem_object_get_pages(obj);
337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
338 struct drm_i915_gem_pwrite *args,
339 struct drm_file *file_priv)
341 struct drm_device *dev = obj->base.dev;
342 void *vaddr = obj->phys_handle->vaddr + args->offset;
343 char __user *user_data = u64_to_user_ptr(args->data_ptr);
346 /* We manually control the domain here and pretend that it
347 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
349 ret = i915_gem_object_wait_rendering(obj, false);
353 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
354 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
355 unsigned long unwritten;
357 /* The physical object once assigned is fixed for the lifetime
358 * of the obj, so we can safely drop the lock and continue
361 mutex_unlock(&dev->struct_mutex);
362 unwritten = copy_from_user(vaddr, user_data, args->size);
363 mutex_lock(&dev->struct_mutex);
370 drm_clflush_virt_range(vaddr, args->size);
371 i915_gem_chipset_flush(to_i915(dev));
374 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
378 void *i915_gem_object_alloc(struct drm_device *dev)
380 struct drm_i915_private *dev_priv = to_i915(dev);
381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
384 void i915_gem_object_free(struct drm_i915_gem_object *obj)
386 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
387 kmem_cache_free(dev_priv->objects, obj);
391 i915_gem_create(struct drm_file *file,
392 struct drm_device *dev,
396 struct drm_i915_gem_object *obj;
400 size = roundup(size, PAGE_SIZE);
404 /* Allocate the new object */
405 obj = i915_gem_object_create(dev, size);
409 ret = drm_gem_handle_create(file, &obj->base, &handle);
410 /* drop reference from allocate - handle holds it now */
411 drm_gem_object_unreference_unlocked(&obj->base);
420 i915_gem_dumb_create(struct drm_file *file,
421 struct drm_device *dev,
422 struct drm_mode_create_dumb *args)
424 /* have to work out size/pitch and return them */
425 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
426 args->size = args->pitch * args->height;
427 return i915_gem_create(file, dev,
428 args->size, &args->handle);
432 * Creates a new mm object and returns a handle to it.
433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
438 i915_gem_create_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *file)
441 struct drm_i915_gem_create *args = data;
443 return i915_gem_create(file, dev,
444 args->size, &args->handle);
448 __copy_to_user_swizzled(char __user *cpu_vaddr,
449 const char *gpu_vaddr, int gpu_offset,
452 int ret, cpu_offset = 0;
455 int cacheline_end = ALIGN(gpu_offset + 1, 64);
456 int this_length = min(cacheline_end - gpu_offset, length);
457 int swizzled_gpu_offset = gpu_offset ^ 64;
459 ret = __copy_to_user(cpu_vaddr + cpu_offset,
460 gpu_vaddr + swizzled_gpu_offset,
465 cpu_offset += this_length;
466 gpu_offset += this_length;
467 length -= this_length;
474 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
475 const char __user *cpu_vaddr,
478 int ret, cpu_offset = 0;
481 int cacheline_end = ALIGN(gpu_offset + 1, 64);
482 int this_length = min(cacheline_end - gpu_offset, length);
483 int swizzled_gpu_offset = gpu_offset ^ 64;
485 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
486 cpu_vaddr + cpu_offset,
491 cpu_offset += this_length;
492 gpu_offset += this_length;
493 length -= this_length;
500 * Pins the specified object's pages and synchronizes the object with
501 * GPU accesses. Sets needs_clflush to non-zero if the caller should
502 * flush the object from the CPU cache.
504 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
511 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
515 /* If we're not in the cpu read domain, set ourself into the gtt
516 * read domain and manually flush cachelines (if required). This
517 * optimizes for the case when the gpu will dirty the data
518 * anyway again before the next pread happens. */
519 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
521 ret = i915_gem_object_wait_rendering(obj, true);
526 ret = i915_gem_object_get_pages(obj);
530 i915_gem_object_pin_pages(obj);
535 /* Per-page copy function for the shmem pread fastpath.
536 * Flushes invalid cachelines before reading the target if
537 * needs_clflush is set. */
539 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
540 char __user *user_data,
541 bool page_do_bit17_swizzling, bool needs_clflush)
546 if (unlikely(page_do_bit17_swizzling))
549 vaddr = kmap_atomic(page);
551 drm_clflush_virt_range(vaddr + shmem_page_offset,
553 ret = __copy_to_user_inatomic(user_data,
554 vaddr + shmem_page_offset,
556 kunmap_atomic(vaddr);
558 return ret ? -EFAULT : 0;
562 shmem_clflush_swizzled_range(char *addr, unsigned long length,
565 if (unlikely(swizzled)) {
566 unsigned long start = (unsigned long) addr;
567 unsigned long end = (unsigned long) addr + length;
569 /* For swizzling simply ensure that we always flush both
570 * channels. Lame, but simple and it works. Swizzled
571 * pwrite/pread is far from a hotpath - current userspace
572 * doesn't use it at all. */
573 start = round_down(start, 128);
574 end = round_up(end, 128);
576 drm_clflush_virt_range((void *)start, end - start);
578 drm_clflush_virt_range(addr, length);
583 /* Only difference to the fast-path function is that this can handle bit17
584 * and uses non-atomic copy and kmap functions. */
586 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
587 char __user *user_data,
588 bool page_do_bit17_swizzling, bool needs_clflush)
595 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
597 page_do_bit17_swizzling);
599 if (page_do_bit17_swizzling)
600 ret = __copy_to_user_swizzled(user_data,
601 vaddr, shmem_page_offset,
604 ret = __copy_to_user(user_data,
605 vaddr + shmem_page_offset,
609 return ret ? - EFAULT : 0;
612 static inline unsigned long
613 slow_user_access(struct io_mapping *mapping,
614 uint64_t page_base, int page_offset,
615 char __user *user_data,
616 unsigned long length, bool pwrite)
618 void __iomem *ioaddr;
622 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr = (void __force *)ioaddr + page_offset;
626 unwritten = __copy_from_user(vaddr, user_data, length);
628 unwritten = __copy_to_user(user_data, vaddr, length);
630 io_mapping_unmap(ioaddr);
635 i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr)
639 struct drm_i915_private *dev_priv = to_i915(dev);
640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node;
642 char __user *user_data;
647 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
649 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
653 ret = i915_gem_object_get_pages(obj);
655 remove_mappable_node(&node);
659 i915_gem_object_pin_pages(obj);
661 node.start = i915_gem_obj_ggtt_offset(obj);
662 node.allocated = false;
663 ret = i915_gem_object_put_fence(obj);
668 ret = i915_gem_object_set_to_gtt_domain(obj, false);
672 user_data = u64_to_user_ptr(data_ptr);
674 offset = data_offset;
676 mutex_unlock(&dev->struct_mutex);
677 if (likely(!i915.prefault_disable)) {
678 ret = fault_in_multipages_writeable(user_data, remain);
680 mutex_lock(&dev->struct_mutex);
686 /* Operation in this page
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
692 u32 page_base = node.start;
693 unsigned page_offset = offset_in_page(offset);
694 unsigned page_length = PAGE_SIZE - page_offset;
695 page_length = remain < page_length ? remain : page_length;
696 if (node.allocated) {
698 ggtt->base.insert_page(&ggtt->base,
699 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
704 page_base += offset & PAGE_MASK;
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
710 if (slow_user_access(ggtt->mappable, page_base,
711 page_offset, user_data,
712 page_length, false)) {
717 remain -= page_length;
718 user_data += page_length;
719 offset += page_length;
722 mutex_lock(&dev->struct_mutex);
723 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
730 ret = i915_gem_object_set_to_gtt_domain(obj, false);
734 if (node.allocated) {
736 ggtt->base.clear_range(&ggtt->base,
737 node.start, node.size,
739 i915_gem_object_unpin_pages(obj);
740 remove_mappable_node(&node);
742 i915_gem_object_ggtt_unpin(obj);
749 i915_gem_shmem_pread(struct drm_device *dev,
750 struct drm_i915_gem_object *obj,
751 struct drm_i915_gem_pread *args,
752 struct drm_file *file)
754 char __user *user_data;
757 int shmem_page_offset, page_length, ret = 0;
758 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
760 int needs_clflush = 0;
761 struct sg_page_iter sg_iter;
763 if (!i915_gem_object_has_struct_page(obj))
766 user_data = u64_to_user_ptr(args->data_ptr);
769 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
771 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
775 offset = args->offset;
777 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
778 offset >> PAGE_SHIFT) {
779 struct page *page = sg_page_iter_page(&sg_iter);
784 /* Operation in this page
786 * shmem_page_offset = offset within page in shmem file
787 * page_length = bytes to copy for this page
789 shmem_page_offset = offset_in_page(offset);
790 page_length = remain;
791 if ((shmem_page_offset + page_length) > PAGE_SIZE)
792 page_length = PAGE_SIZE - shmem_page_offset;
794 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
795 (page_to_phys(page) & (1 << 17)) != 0;
797 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
798 user_data, page_do_bit17_swizzling,
803 mutex_unlock(&dev->struct_mutex);
805 if (likely(!i915.prefault_disable) && !prefaulted) {
806 ret = fault_in_multipages_writeable(user_data, remain);
807 /* Userspace is tricking us, but we've already clobbered
808 * its pages with the prefault and promised to write the
809 * data up to the first fault. Hence ignore any errors
810 * and just continue. */
815 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling,
819 mutex_lock(&dev->struct_mutex);
825 remain -= page_length;
826 user_data += page_length;
827 offset += page_length;
831 i915_gem_object_unpin_pages(obj);
837 * Reads data from the object referenced by handle.
838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
842 * On error, the contents of *data are undefined.
845 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
846 struct drm_file *file)
848 struct drm_i915_gem_pread *args = data;
849 struct drm_i915_gem_object *obj;
855 if (!access_ok(VERIFY_WRITE,
856 u64_to_user_ptr(args->data_ptr),
860 ret = i915_mutex_lock_interruptible(dev);
864 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
865 if (&obj->base == NULL) {
870 /* Bounds check source. */
871 if (args->offset > obj->base.size ||
872 args->size > obj->base.size - args->offset) {
877 trace_i915_gem_object_pread(obj, args->offset, args->size);
879 ret = i915_gem_shmem_pread(dev, obj, args, file);
881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV)
883 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr);
887 drm_gem_object_unreference(&obj->base);
889 mutex_unlock(&dev->struct_mutex);
893 /* This is the fast write path which cannot handle
894 * page faults in the source data
898 fast_user_write(struct io_mapping *mapping,
899 loff_t page_base, int page_offset,
900 char __user *user_data,
903 void __iomem *vaddr_atomic;
905 unsigned long unwritten;
907 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
908 /* We can use the cpu mem copy function because this is X86. */
909 vaddr = (void __force*)vaddr_atomic + page_offset;
910 unwritten = __copy_from_user_inatomic_nocache(vaddr,
912 io_mapping_unmap_atomic(vaddr_atomic);
917 * This is the fast pwrite path, where we copy the data directly from the
918 * user into the GTT, uncached.
919 * @dev: drm device pointer
920 * @obj: i915 gem object
921 * @args: pwrite arguments structure
922 * @file: drm file pointer
925 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
926 struct drm_i915_gem_object *obj,
927 struct drm_i915_gem_pwrite *args,
928 struct drm_file *file)
930 struct i915_ggtt *ggtt = &i915->ggtt;
931 struct drm_device *dev = obj->base.dev;
932 struct drm_mm_node node;
933 uint64_t remain, offset;
934 char __user *user_data;
936 bool hit_slow_path = false;
938 if (obj->tiling_mode != I915_TILING_NONE)
941 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
943 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
947 ret = i915_gem_object_get_pages(obj);
949 remove_mappable_node(&node);
953 i915_gem_object_pin_pages(obj);
955 node.start = i915_gem_obj_ggtt_offset(obj);
956 node.allocated = false;
957 ret = i915_gem_object_put_fence(obj);
962 ret = i915_gem_object_set_to_gtt_domain(obj, true);
966 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
969 user_data = u64_to_user_ptr(args->data_ptr);
970 offset = args->offset;
973 /* Operation in this page
975 * page_base = page offset within aperture
976 * page_offset = offset within page
977 * page_length = bytes to copy for this page
979 u32 page_base = node.start;
980 unsigned page_offset = offset_in_page(offset);
981 unsigned page_length = PAGE_SIZE - page_offset;
982 page_length = remain < page_length ? remain : page_length;
983 if (node.allocated) {
984 wmb(); /* flush the write before we modify the GGTT */
985 ggtt->base.insert_page(&ggtt->base,
986 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
987 node.start, I915_CACHE_NONE, 0);
988 wmb(); /* flush modifications to the GGTT (insert_page) */
990 page_base += offset & PAGE_MASK;
992 /* If we get a fault while copying data, then (presumably) our
993 * source page isn't available. Return the error and we'll
994 * retry in the slow path.
995 * If the object is non-shmem backed, we retry again with the
996 * path that handles page fault.
998 if (fast_user_write(ggtt->mappable, page_base,
999 page_offset, user_data, page_length)) {
1000 hit_slow_path = true;
1001 mutex_unlock(&dev->struct_mutex);
1002 if (slow_user_access(ggtt->mappable,
1004 page_offset, user_data,
1005 page_length, true)) {
1007 mutex_lock(&dev->struct_mutex);
1011 mutex_lock(&dev->struct_mutex);
1014 remain -= page_length;
1015 user_data += page_length;
1016 offset += page_length;
1020 if (hit_slow_path) {
1022 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1023 /* The user has modified the object whilst we tried
1024 * reading from it, and we now have no idea what domain
1025 * the pages should be in. As we have just been touching
1026 * them directly, flush everything back to the GTT
1029 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1033 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1035 if (node.allocated) {
1037 ggtt->base.clear_range(&ggtt->base,
1038 node.start, node.size,
1040 i915_gem_object_unpin_pages(obj);
1041 remove_mappable_node(&node);
1043 i915_gem_object_ggtt_unpin(obj);
1049 /* Per-page copy function for the shmem pwrite fastpath.
1050 * Flushes invalid cachelines before writing to the target if
1051 * needs_clflush_before is set and flushes out any written cachelines after
1052 * writing if needs_clflush is set. */
1054 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1055 char __user *user_data,
1056 bool page_do_bit17_swizzling,
1057 bool needs_clflush_before,
1058 bool needs_clflush_after)
1063 if (unlikely(page_do_bit17_swizzling))
1066 vaddr = kmap_atomic(page);
1067 if (needs_clflush_before)
1068 drm_clflush_virt_range(vaddr + shmem_page_offset,
1070 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1071 user_data, page_length);
1072 if (needs_clflush_after)
1073 drm_clflush_virt_range(vaddr + shmem_page_offset,
1075 kunmap_atomic(vaddr);
1077 return ret ? -EFAULT : 0;
1080 /* Only difference to the fast-path function is that this can handle bit17
1081 * and uses non-atomic copy and kmap functions. */
1083 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1084 char __user *user_data,
1085 bool page_do_bit17_swizzling,
1086 bool needs_clflush_before,
1087 bool needs_clflush_after)
1093 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1094 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1096 page_do_bit17_swizzling);
1097 if (page_do_bit17_swizzling)
1098 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1102 ret = __copy_from_user(vaddr + shmem_page_offset,
1105 if (needs_clflush_after)
1106 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1108 page_do_bit17_swizzling);
1111 return ret ? -EFAULT : 0;
1115 i915_gem_shmem_pwrite(struct drm_device *dev,
1116 struct drm_i915_gem_object *obj,
1117 struct drm_i915_gem_pwrite *args,
1118 struct drm_file *file)
1122 char __user *user_data;
1123 int shmem_page_offset, page_length, ret = 0;
1124 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1125 int hit_slowpath = 0;
1126 int needs_clflush_after = 0;
1127 int needs_clflush_before = 0;
1128 struct sg_page_iter sg_iter;
1130 user_data = u64_to_user_ptr(args->data_ptr);
1131 remain = args->size;
1133 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1135 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1136 /* If we're not in the cpu write domain, set ourself into the gtt
1137 * write domain and manually flush cachelines (if required). This
1138 * optimizes for the case when the gpu will use the data
1139 * right away and we therefore have to clflush anyway. */
1140 needs_clflush_after = cpu_write_needs_clflush(obj);
1141 ret = i915_gem_object_wait_rendering(obj, false);
1145 /* Same trick applies to invalidate partially written cachelines read
1146 * before writing. */
1147 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1148 needs_clflush_before =
1149 !cpu_cache_is_coherent(dev, obj->cache_level);
1151 ret = i915_gem_object_get_pages(obj);
1155 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1157 i915_gem_object_pin_pages(obj);
1159 offset = args->offset;
1162 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1163 offset >> PAGE_SHIFT) {
1164 struct page *page = sg_page_iter_page(&sg_iter);
1165 int partial_cacheline_write;
1170 /* Operation in this page
1172 * shmem_page_offset = offset within page in shmem file
1173 * page_length = bytes to copy for this page
1175 shmem_page_offset = offset_in_page(offset);
1177 page_length = remain;
1178 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1179 page_length = PAGE_SIZE - shmem_page_offset;
1181 /* If we don't overwrite a cacheline completely we need to be
1182 * careful to have up-to-date data by first clflushing. Don't
1183 * overcomplicate things and flush the entire patch. */
1184 partial_cacheline_write = needs_clflush_before &&
1185 ((shmem_page_offset | page_length)
1186 & (boot_cpu_data.x86_clflush_size - 1));
1188 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1189 (page_to_phys(page) & (1 << 17)) != 0;
1191 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1192 user_data, page_do_bit17_swizzling,
1193 partial_cacheline_write,
1194 needs_clflush_after);
1199 mutex_unlock(&dev->struct_mutex);
1200 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1201 user_data, page_do_bit17_swizzling,
1202 partial_cacheline_write,
1203 needs_clflush_after);
1205 mutex_lock(&dev->struct_mutex);
1211 remain -= page_length;
1212 user_data += page_length;
1213 offset += page_length;
1217 i915_gem_object_unpin_pages(obj);
1221 * Fixup: Flush cpu caches in case we didn't flush the dirty
1222 * cachelines in-line while writing and the object moved
1223 * out of the cpu write domain while we've dropped the lock.
1225 if (!needs_clflush_after &&
1226 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1227 if (i915_gem_clflush_object(obj, obj->pin_display))
1228 needs_clflush_after = true;
1232 if (needs_clflush_after)
1233 i915_gem_chipset_flush(to_i915(dev));
1235 obj->cache_dirty = true;
1237 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1242 * Writes data to the object referenced by handle.
1244 * @data: ioctl data blob
1247 * On error, the contents of the buffer that were to be modified are undefined.
1250 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *file)
1253 struct drm_i915_private *dev_priv = to_i915(dev);
1254 struct drm_i915_gem_pwrite *args = data;
1255 struct drm_i915_gem_object *obj;
1258 if (args->size == 0)
1261 if (!access_ok(VERIFY_READ,
1262 u64_to_user_ptr(args->data_ptr),
1266 if (likely(!i915.prefault_disable)) {
1267 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1273 intel_runtime_pm_get(dev_priv);
1275 ret = i915_mutex_lock_interruptible(dev);
1279 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1280 if (&obj->base == NULL) {
1285 /* Bounds check destination. */
1286 if (args->offset > obj->base.size ||
1287 args->size > obj->base.size - args->offset) {
1292 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1295 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1296 * it would end up going through the fenced access, and we'll get
1297 * different detiling behavior between reading and writing.
1298 * pread/pwrite currently are reading and writing from the CPU
1299 * perspective, requiring manual detiling by the client.
1301 if (!i915_gem_object_has_struct_page(obj) ||
1302 cpu_write_needs_clflush(obj)) {
1303 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1304 /* Note that the gtt paths might fail with non-page-backed user
1305 * pointers (e.g. gtt mappings when moving data between
1306 * textures). Fallback to the shmem path in that case. */
1309 if (ret == -EFAULT || ret == -ENOSPC) {
1310 if (obj->phys_handle)
1311 ret = i915_gem_phys_pwrite(obj, args, file);
1312 else if (i915_gem_object_has_struct_page(obj))
1313 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1319 drm_gem_object_unreference(&obj->base);
1321 mutex_unlock(&dev->struct_mutex);
1323 intel_runtime_pm_put(dev_priv);
1329 i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1331 if (__i915_terminally_wedged(reset_counter))
1334 if (__i915_reset_in_progress(reset_counter)) {
1335 /* Non-interruptible callers can't handle -EAGAIN, hence return
1336 * -EIO unconditionally for these. */
1346 static unsigned long local_clock_us(unsigned *cpu)
1350 /* Cheaply and approximately convert from nanoseconds to microseconds.
1351 * The result and subsequent calculations are also defined in the same
1352 * approximate microseconds units. The principal source of timing
1353 * error here is from the simple truncation.
1355 * Note that local_clock() is only defined wrt to the current CPU;
1356 * the comparisons are no longer valid if we switch CPUs. Instead of
1357 * blocking preemption for the entire busywait, we can detect the CPU
1358 * switch and use that as indicator of system load and a reason to
1359 * stop busywaiting, see busywait_stop().
1362 t = local_clock() >> 10;
1368 static bool busywait_stop(unsigned long timeout, unsigned cpu)
1372 if (time_after(local_clock_us(&this_cpu), timeout))
1375 return this_cpu != cpu;
1378 bool __i915_spin_request(const struct drm_i915_gem_request *req,
1379 int state, unsigned long timeout_us)
1383 /* When waiting for high frequency requests, e.g. during synchronous
1384 * rendering split between the CPU and GPU, the finite amount of time
1385 * required to set up the irq and wait upon it limits the response
1386 * rate. By busywaiting on the request completion for a short while we
1387 * can service the high frequency waits as quick as possible. However,
1388 * if it is a slow request, we want to sleep as quickly as possible.
1389 * The tradeoff between waiting and sleeping is roughly the time it
1390 * takes to sleep on a request, on the order of a microsecond.
1393 timeout_us += local_clock_us(&cpu);
1395 if (i915_gem_request_completed(req))
1398 if (signal_pending_state(state, current))
1401 if (busywait_stop(timeout_us, cpu))
1404 cpu_relax_lowlatency();
1405 } while (!need_resched());
1411 * __i915_wait_request - wait until execution of request has finished
1413 * @interruptible: do an interruptible wait (normally yes)
1414 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1417 * Note: It is of utmost importance that the passed in seqno and reset_counter
1418 * values have been read by the caller in an smp safe manner. Where read-side
1419 * locks are involved, it is sufficient to read the reset_counter before
1420 * unlocking the lock that protects the seqno. For lockless tricks, the
1421 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1424 * Returns 0 if the request was found within the alloted time. Else returns the
1425 * errno with remaining time filled in timeout argument.
1427 int __i915_wait_request(struct drm_i915_gem_request *req,
1430 struct intel_rps_client *rps)
1432 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1434 struct intel_wait wait;
1435 unsigned long timeout_remain;
1436 s64 before = 0; /* Only to silence a compiler warning. */
1441 if (list_empty(&req->list))
1444 if (i915_gem_request_completed(req))
1447 timeout_remain = MAX_SCHEDULE_TIMEOUT;
1449 if (WARN_ON(*timeout < 0))
1455 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
1458 * Record current time in case interrupted by signal, or wedged.
1460 before = ktime_get_raw_ns();
1463 trace_i915_gem_request_wait_begin(req);
1465 /* This client is about to stall waiting for the GPU. In many cases
1466 * this is undesirable and limits the throughput of the system, as
1467 * many clients cannot continue processing user input/output whilst
1468 * blocked. RPS autotuning may take tens of milliseconds to respond
1469 * to the GPU load and thus incurs additional latency for the client.
1470 * We can circumvent that by promoting the GPU frequency to maximum
1471 * before we wait. This makes the GPU throttle up much more quickly
1472 * (good for benchmarks and user experience, e.g. window animations),
1473 * but at a cost of spending more power processing the workload
1474 * (bad for battery). Not all clients even want their results
1475 * immediately and for them we should just let the GPU select its own
1476 * frequency to maximise efficiency. To prevent a single client from
1477 * forcing the clocks too high for the whole system, we only allow
1478 * each client to waitboost once in a busy period.
1480 if (INTEL_INFO(req->i915)->gen >= 6)
1481 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
1483 /* Optimistic spin for the next ~jiffie before touching IRQs */
1484 if (i915_spin_request(req, state, 5))
1487 set_current_state(state);
1488 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1490 intel_wait_init(&wait, req->seqno);
1491 if (intel_engine_add_wait(req->engine, &wait))
1492 /* In order to check that we haven't missed the interrupt
1493 * as we enabled it, we need to kick ourselves to do a
1494 * coherent check on the seqno before we sleep.
1499 if (signal_pending_state(state, current)) {
1504 timeout_remain = io_schedule_timeout(timeout_remain);
1505 if (timeout_remain == 0) {
1510 if (intel_wait_complete(&wait))
1513 set_current_state(state);
1516 /* Carefully check if the request is complete, giving time
1517 * for the seqno to be visible following the interrupt.
1518 * We also have to check in case we are kicked by the GPU
1519 * reset in order to drop the struct_mutex.
1521 if (__i915_request_irq_complete(req))
1524 /* Only spin if we know the GPU is processing this request */
1525 if (i915_spin_request(req, state, 2))
1528 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1530 intel_engine_remove_wait(req->engine, &wait);
1531 __set_current_state(TASK_RUNNING);
1533 trace_i915_gem_request_wait_end(req);
1536 s64 tres = *timeout - (ktime_get_raw_ns() - before);
1538 *timeout = tres < 0 ? 0 : tres;
1541 * Apparently ktime isn't accurate enough and occasionally has a
1542 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1543 * things up to make the test happy. We allow up to 1 jiffy.
1545 * This is a regrssion from the timespec->ktime conversion.
1547 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1551 if (rps && req->seqno == req->engine->last_submitted_seqno) {
1552 /* The GPU is now idle and this client has stalled.
1553 * Since no other client has submitted a request in the
1554 * meantime, assume that this client is the only one
1555 * supplying work to the GPU but is unable to keep that
1556 * work supplied because it is waiting. Since the GPU is
1557 * then never kept fully busy, RPS autoclocking will
1558 * keep the clocks relatively low, causing further delays.
1559 * Compensate by giving the synchronous client credit for
1560 * a waitboost next time.
1562 spin_lock(&req->i915->rps.client_lock);
1563 list_del_init(&rps->link);
1564 spin_unlock(&req->i915->rps.client_lock);
1570 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1571 struct drm_file *file)
1573 struct drm_i915_file_private *file_priv;
1575 WARN_ON(!req || !file || req->file_priv);
1583 file_priv = file->driver_priv;
1585 spin_lock(&file_priv->mm.lock);
1586 req->file_priv = file_priv;
1587 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1588 spin_unlock(&file_priv->mm.lock);
1590 req->pid = get_pid(task_pid(current));
1596 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1598 struct drm_i915_file_private *file_priv = request->file_priv;
1603 spin_lock(&file_priv->mm.lock);
1604 list_del(&request->client_list);
1605 request->file_priv = NULL;
1606 spin_unlock(&file_priv->mm.lock);
1608 put_pid(request->pid);
1609 request->pid = NULL;
1612 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1614 trace_i915_gem_request_retire(request);
1616 /* We know the GPU must have read the request to have
1617 * sent us the seqno + interrupt, so use the position
1618 * of tail of the request to update the last known position
1621 * Note this requires that we are always called in request
1624 request->ringbuf->last_retired_head = request->postfix;
1626 list_del_init(&request->list);
1627 i915_gem_request_remove_from_client(request);
1629 if (request->previous_context) {
1630 if (i915.enable_execlists)
1631 intel_lr_context_unpin(request->previous_context,
1635 i915_gem_context_unreference(request->ctx);
1636 i915_gem_request_unreference(request);
1640 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1642 struct intel_engine_cs *engine = req->engine;
1643 struct drm_i915_gem_request *tmp;
1645 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1647 if (list_empty(&req->list))
1651 tmp = list_first_entry(&engine->request_list,
1652 typeof(*tmp), list);
1654 i915_gem_request_retire(tmp);
1655 } while (tmp != req);
1657 WARN_ON(i915_verify_lists(engine->dev));
1661 * Waits for a request to be signaled, and cleans up the
1662 * request and object lists appropriately for that event.
1663 * @req: request to wait on
1666 i915_wait_request(struct drm_i915_gem_request *req)
1668 struct drm_i915_private *dev_priv = req->i915;
1672 interruptible = dev_priv->mm.interruptible;
1674 BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
1676 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1680 /* If the GPU hung, we want to keep the requests to find the guilty. */
1681 if (!i915_reset_in_progress(&dev_priv->gpu_error))
1682 __i915_gem_request_retire__upto(req);
1688 * Ensures that all rendering to the object has completed and the object is
1689 * safe to unbind from the GTT or access from the CPU.
1690 * @obj: i915 gem object
1691 * @readonly: waiting for read access or write
1694 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1703 if (obj->last_write_req != NULL) {
1704 ret = i915_wait_request(obj->last_write_req);
1708 i = obj->last_write_req->engine->id;
1709 if (obj->last_read_req[i] == obj->last_write_req)
1710 i915_gem_object_retire__read(obj, i);
1712 i915_gem_object_retire__write(obj);
1715 for (i = 0; i < I915_NUM_ENGINES; i++) {
1716 if (obj->last_read_req[i] == NULL)
1719 ret = i915_wait_request(obj->last_read_req[i]);
1723 i915_gem_object_retire__read(obj, i);
1725 GEM_BUG_ON(obj->active);
1732 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1733 struct drm_i915_gem_request *req)
1735 int ring = req->engine->id;
1737 if (obj->last_read_req[ring] == req)
1738 i915_gem_object_retire__read(obj, ring);
1739 else if (obj->last_write_req == req)
1740 i915_gem_object_retire__write(obj);
1742 if (!i915_reset_in_progress(&req->i915->gpu_error))
1743 __i915_gem_request_retire__upto(req);
1746 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1747 * as the object state may change during this call.
1749 static __must_check int
1750 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1751 struct intel_rps_client *rps,
1754 struct drm_device *dev = obj->base.dev;
1755 struct drm_i915_private *dev_priv = to_i915(dev);
1756 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1759 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1760 BUG_ON(!dev_priv->mm.interruptible);
1766 struct drm_i915_gem_request *req;
1768 req = obj->last_write_req;
1772 requests[n++] = i915_gem_request_reference(req);
1774 for (i = 0; i < I915_NUM_ENGINES; i++) {
1775 struct drm_i915_gem_request *req;
1777 req = obj->last_read_req[i];
1781 requests[n++] = i915_gem_request_reference(req);
1785 mutex_unlock(&dev->struct_mutex);
1787 for (i = 0; ret == 0 && i < n; i++)
1788 ret = __i915_wait_request(requests[i], true, NULL, rps);
1789 mutex_lock(&dev->struct_mutex);
1791 for (i = 0; i < n; i++) {
1793 i915_gem_object_retire_request(obj, requests[i]);
1794 i915_gem_request_unreference(requests[i]);
1800 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1802 struct drm_i915_file_private *fpriv = file->driver_priv;
1806 static enum fb_op_origin
1807 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1809 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1810 ORIGIN_GTT : ORIGIN_CPU;
1814 * Called when user space prepares to use an object with the CPU, either
1815 * through the mmap ioctl's mapping or a GTT mapping.
1817 * @data: ioctl data blob
1821 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1822 struct drm_file *file)
1824 struct drm_i915_gem_set_domain *args = data;
1825 struct drm_i915_gem_object *obj;
1826 uint32_t read_domains = args->read_domains;
1827 uint32_t write_domain = args->write_domain;
1830 /* Only handle setting domains to types used by the CPU. */
1831 if (write_domain & I915_GEM_GPU_DOMAINS)
1834 if (read_domains & I915_GEM_GPU_DOMAINS)
1837 /* Having something in the write domain implies it's in the read
1838 * domain, and only that read domain. Enforce that in the request.
1840 if (write_domain != 0 && read_domains != write_domain)
1843 ret = i915_mutex_lock_interruptible(dev);
1847 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1848 if (&obj->base == NULL) {
1853 /* Try to flush the object off the GPU without holding the lock.
1854 * We will repeat the flush holding the lock in the normal manner
1855 * to catch cases where we are gazumped.
1857 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1858 to_rps_client(file),
1863 if (read_domains & I915_GEM_DOMAIN_GTT)
1864 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1866 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1868 if (write_domain != 0)
1869 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1872 drm_gem_object_unreference(&obj->base);
1874 mutex_unlock(&dev->struct_mutex);
1879 * Called when user space has done writes to this buffer
1881 * @data: ioctl data blob
1885 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1886 struct drm_file *file)
1888 struct drm_i915_gem_sw_finish *args = data;
1889 struct drm_i915_gem_object *obj;
1892 ret = i915_mutex_lock_interruptible(dev);
1896 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
1897 if (&obj->base == NULL) {
1902 /* Pinned buffers may be scanout, so flush the cache */
1903 if (obj->pin_display)
1904 i915_gem_object_flush_cpu_write_domain(obj);
1906 drm_gem_object_unreference(&obj->base);
1908 mutex_unlock(&dev->struct_mutex);
1913 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1916 * @data: ioctl data blob
1919 * While the mapping holds a reference on the contents of the object, it doesn't
1920 * imply a ref on the object itself.
1924 * DRM driver writers who look a this function as an example for how to do GEM
1925 * mmap support, please don't implement mmap support like here. The modern way
1926 * to implement DRM mmap support is with an mmap offset ioctl (like
1927 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1928 * That way debug tooling like valgrind will understand what's going on, hiding
1929 * the mmap call in a driver private ioctl will break that. The i915 driver only
1930 * does cpu mmaps this way because we didn't know better.
1933 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1934 struct drm_file *file)
1936 struct drm_i915_gem_mmap *args = data;
1937 struct drm_gem_object *obj;
1940 if (args->flags & ~(I915_MMAP_WC))
1943 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1946 obj = drm_gem_object_lookup(file, args->handle);
1950 /* prime objects have no backing filp to GEM mmap
1954 drm_gem_object_unreference_unlocked(obj);
1958 addr = vm_mmap(obj->filp, 0, args->size,
1959 PROT_READ | PROT_WRITE, MAP_SHARED,
1961 if (args->flags & I915_MMAP_WC) {
1962 struct mm_struct *mm = current->mm;
1963 struct vm_area_struct *vma;
1965 if (down_write_killable(&mm->mmap_sem)) {
1966 drm_gem_object_unreference_unlocked(obj);
1969 vma = find_vma(mm, addr);
1972 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1975 up_write(&mm->mmap_sem);
1977 /* This may race, but that's ok, it only gets set */
1978 WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
1980 drm_gem_object_unreference_unlocked(obj);
1981 if (IS_ERR((void *)addr))
1984 args->addr_ptr = (uint64_t) addr;
1990 * i915_gem_fault - fault a page into the GTT
1991 * @vma: VMA in question
1994 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1995 * from userspace. The fault handler takes care of binding the object to
1996 * the GTT (if needed), allocating and programming a fence register (again,
1997 * only if needed based on whether the old reg is still valid or the object
1998 * is tiled) and inserting a new PTE into the faulting process.
2000 * Note that the faulting process may involve evicting existing objects
2001 * from the GTT and/or fence registers to make room. So performance may
2002 * suffer if the GTT working set is large or there are few fence registers
2005 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2007 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
2008 struct drm_device *dev = obj->base.dev;
2009 struct drm_i915_private *dev_priv = to_i915(dev);
2010 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2011 struct i915_ggtt_view view = i915_ggtt_view_normal;
2012 pgoff_t page_offset;
2015 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
2017 intel_runtime_pm_get(dev_priv);
2019 /* We don't use vmf->pgoff since that has the fake offset */
2020 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
2023 ret = i915_mutex_lock_interruptible(dev);
2027 trace_i915_gem_object_fault(obj, page_offset, true, write);
2029 /* Try to flush the object off the GPU first without holding the lock.
2030 * Upon reacquiring the lock, we will perform our sanity checks and then
2031 * repeat the flush holding the lock in the normal manner to catch cases
2032 * where we are gazumped.
2034 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
2038 /* Access to snoopable pages through the GTT is incoherent. */
2039 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
2044 /* Use a partial view if the object is bigger than the aperture. */
2045 if (obj->base.size >= ggtt->mappable_end &&
2046 obj->tiling_mode == I915_TILING_NONE) {
2047 static const unsigned int chunk_size = 256; // 1 MiB
2049 memset(&view, 0, sizeof(view));
2050 view.type = I915_GGTT_VIEW_PARTIAL;
2051 view.params.partial.offset = rounddown(page_offset, chunk_size);
2052 view.params.partial.size =
2055 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
2056 view.params.partial.offset);
2059 /* Now pin it into the GTT if needed */
2060 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
2064 ret = i915_gem_object_set_to_gtt_domain(obj, write);
2068 ret = i915_gem_object_get_fence(obj);
2072 /* Finally, remap it using the new GTT offset */
2073 pfn = ggtt->mappable_base +
2074 i915_gem_obj_ggtt_offset_view(obj, &view);
2077 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
2078 /* Overriding existing pages in partial view does not cause
2079 * us any trouble as TLBs are still valid because the fault
2080 * is due to userspace losing part of the mapping or never
2081 * having accessed it before (at this partials' range).
2083 unsigned long base = vma->vm_start +
2084 (view.params.partial.offset << PAGE_SHIFT);
2087 for (i = 0; i < view.params.partial.size; i++) {
2088 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
2093 obj->fault_mappable = true;
2095 if (!obj->fault_mappable) {
2096 unsigned long size = min_t(unsigned long,
2097 vma->vm_end - vma->vm_start,
2101 for (i = 0; i < size >> PAGE_SHIFT; i++) {
2102 ret = vm_insert_pfn(vma,
2103 (unsigned long)vma->vm_start + i * PAGE_SIZE,
2109 obj->fault_mappable = true;
2111 ret = vm_insert_pfn(vma,
2112 (unsigned long)vmf->virtual_address,
2116 i915_gem_object_ggtt_unpin_view(obj, &view);
2118 mutex_unlock(&dev->struct_mutex);
2123 * We eat errors when the gpu is terminally wedged to avoid
2124 * userspace unduly crashing (gl has no provisions for mmaps to
2125 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2126 * and so needs to be reported.
2128 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2129 ret = VM_FAULT_SIGBUS;
2134 * EAGAIN means the gpu is hung and we'll wait for the error
2135 * handler to reset everything when re-faulting in
2136 * i915_mutex_lock_interruptible.
2143 * EBUSY is ok: this just means that another thread
2144 * already did the job.
2146 ret = VM_FAULT_NOPAGE;
2153 ret = VM_FAULT_SIGBUS;
2156 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2157 ret = VM_FAULT_SIGBUS;
2161 intel_runtime_pm_put(dev_priv);
2166 * i915_gem_release_mmap - remove physical page mappings
2167 * @obj: obj in question
2169 * Preserve the reservation of the mmapping with the DRM core code, but
2170 * relinquish ownership of the pages back to the system.
2172 * It is vital that we remove the page mapping if we have mapped a tiled
2173 * object through the GTT and then lose the fence register due to
2174 * resource pressure. Similarly if the object has been moved out of the
2175 * aperture, than pages mapped into userspace must be revoked. Removing the
2176 * mapping will then trigger a page fault on the next user access, allowing
2177 * fixup by i915_gem_fault().
2180 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2182 /* Serialisation between user GTT access and our code depends upon
2183 * revoking the CPU's PTE whilst the mutex is held. The next user
2184 * pagefault then has to wait until we release the mutex.
2186 lockdep_assert_held(&obj->base.dev->struct_mutex);
2188 if (!obj->fault_mappable)
2191 drm_vma_node_unmap(&obj->base.vma_node,
2192 obj->base.dev->anon_inode->i_mapping);
2194 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2195 * memory transactions from userspace before we return. The TLB
2196 * flushing implied above by changing the PTE above *should* be
2197 * sufficient, an extra barrier here just provides us with a bit
2198 * of paranoid documentation about our requirement to serialise
2199 * memory writes before touching registers / GSM.
2203 obj->fault_mappable = false;
2207 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
2209 struct drm_i915_gem_object *obj;
2211 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
2212 i915_gem_release_mmap(obj);
2216 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
2220 if (INTEL_INFO(dev)->gen >= 4 ||
2221 tiling_mode == I915_TILING_NONE)
2224 /* Previous chips need a power-of-two fence region when tiling */
2226 gtt_size = 1024*1024;
2228 gtt_size = 512*1024;
2230 while (gtt_size < size)
2237 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
2239 * @size: object size
2240 * @tiling_mode: tiling mode
2241 * @fenced: is fenced alignemned required or not
2243 * Return the required GTT alignment for an object, taking into account
2244 * potential fence register mapping.
2247 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2248 int tiling_mode, bool fenced)
2251 * Minimum alignment is 4k (GTT page size), but might be greater
2252 * if a fence register is needed for the object.
2254 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2255 tiling_mode == I915_TILING_NONE)
2259 * Previous chips need to be aligned to the size of the smallest
2260 * fence register that can contain the object.
2262 return i915_gem_get_gtt_size(dev, size, tiling_mode);
2265 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2267 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2270 dev_priv->mm.shrinker_no_lock_stealing = true;
2272 ret = drm_gem_create_mmap_offset(&obj->base);
2276 /* Badly fragmented mmap space? The only way we can recover
2277 * space is by destroying unwanted objects. We can't randomly release
2278 * mmap_offsets as userspace expects them to be persistent for the
2279 * lifetime of the objects. The closest we can is to release the
2280 * offsets on purgeable objects by truncating it and marking it purged,
2281 * which prevents userspace from ever using that object again.
2283 i915_gem_shrink(dev_priv,
2284 obj->base.size >> PAGE_SHIFT,
2286 I915_SHRINK_UNBOUND |
2287 I915_SHRINK_PURGEABLE);
2288 ret = drm_gem_create_mmap_offset(&obj->base);
2292 i915_gem_shrink_all(dev_priv);
2293 ret = drm_gem_create_mmap_offset(&obj->base);
2295 dev_priv->mm.shrinker_no_lock_stealing = false;
2300 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2302 drm_gem_free_mmap_offset(&obj->base);
2306 i915_gem_mmap_gtt(struct drm_file *file,
2307 struct drm_device *dev,
2311 struct drm_i915_gem_object *obj;
2314 ret = i915_mutex_lock_interruptible(dev);
2318 obj = to_intel_bo(drm_gem_object_lookup(file, handle));
2319 if (&obj->base == NULL) {
2324 if (obj->madv != I915_MADV_WILLNEED) {
2325 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2330 ret = i915_gem_object_create_mmap_offset(obj);
2334 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2337 drm_gem_object_unreference(&obj->base);
2339 mutex_unlock(&dev->struct_mutex);
2344 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2346 * @data: GTT mapping ioctl data
2347 * @file: GEM object info
2349 * Simply returns the fake offset to userspace so it can mmap it.
2350 * The mmap call will end up in drm_gem_mmap(), which will set things
2351 * up so we can get faults in the handler above.
2353 * The fault handler will take care of binding the object into the GTT
2354 * (since it may have been evicted to make room for something), allocating
2355 * a fence register, and mapping the appropriate aperture address into
2359 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2360 struct drm_file *file)
2362 struct drm_i915_gem_mmap_gtt *args = data;
2364 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2367 /* Immediately discard the backing storage */
2369 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2371 i915_gem_object_free_mmap_offset(obj);
2373 if (obj->base.filp == NULL)
2376 /* Our goal here is to return as much of the memory as
2377 * is possible back to the system as we are called from OOM.
2378 * To do this we must instruct the shmfs to drop all of its
2379 * backing pages, *now*.
2381 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2382 obj->madv = __I915_MADV_PURGED;
2385 /* Try to discard unwanted pages */
2387 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2389 struct address_space *mapping;
2391 switch (obj->madv) {
2392 case I915_MADV_DONTNEED:
2393 i915_gem_object_truncate(obj);
2394 case __I915_MADV_PURGED:
2398 if (obj->base.filp == NULL)
2401 mapping = obj->base.filp->f_mapping,
2402 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2406 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2408 struct sgt_iter sgt_iter;
2412 BUG_ON(obj->madv == __I915_MADV_PURGED);
2414 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2416 /* In the event of a disaster, abandon all caches and
2417 * hope for the best.
2419 i915_gem_clflush_object(obj, true);
2420 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2423 i915_gem_gtt_finish_object(obj);
2425 if (i915_gem_object_needs_bit17_swizzle(obj))
2426 i915_gem_object_save_bit_17_swizzle(obj);
2428 if (obj->madv == I915_MADV_DONTNEED)
2431 for_each_sgt_page(page, sgt_iter, obj->pages) {
2433 set_page_dirty(page);
2435 if (obj->madv == I915_MADV_WILLNEED)
2436 mark_page_accessed(page);
2442 sg_free_table(obj->pages);
2447 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2449 const struct drm_i915_gem_object_ops *ops = obj->ops;
2451 if (obj->pages == NULL)
2454 if (obj->pages_pin_count)
2457 BUG_ON(i915_gem_obj_bound_any(obj));
2459 /* ->put_pages might need to allocate memory for the bit17 swizzle
2460 * array, hence protect them from being reaped by removing them from gtt
2462 list_del(&obj->global_list);
2465 if (is_vmalloc_addr(obj->mapping))
2466 vunmap(obj->mapping);
2468 kunmap(kmap_to_page(obj->mapping));
2469 obj->mapping = NULL;
2472 ops->put_pages(obj);
2475 i915_gem_object_invalidate(obj);
2481 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2483 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2485 struct address_space *mapping;
2486 struct sg_table *st;
2487 struct scatterlist *sg;
2488 struct sgt_iter sgt_iter;
2490 unsigned long last_pfn = 0; /* suppress gcc warning */
2494 /* Assert that the object is not currently in any GPU domain. As it
2495 * wasn't in the GTT, there shouldn't be any way it could have been in
2498 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2499 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2501 st = kmalloc(sizeof(*st), GFP_KERNEL);
2505 page_count = obj->base.size / PAGE_SIZE;
2506 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2511 /* Get the list of pages out of our struct file. They'll be pinned
2512 * at this point until we release them.
2514 * Fail silently without starting the shrinker
2516 mapping = obj->base.filp->f_mapping;
2517 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2518 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2521 for (i = 0; i < page_count; i++) {
2522 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2524 i915_gem_shrink(dev_priv,
2527 I915_SHRINK_UNBOUND |
2528 I915_SHRINK_PURGEABLE);
2529 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2532 /* We've tried hard to allocate the memory by reaping
2533 * our own buffer, now let the real VM do its job and
2534 * go down in flames if truly OOM.
2536 i915_gem_shrink_all(dev_priv);
2537 page = shmem_read_mapping_page(mapping, i);
2539 ret = PTR_ERR(page);
2543 #ifdef CONFIG_SWIOTLB
2544 if (swiotlb_nr_tbl()) {
2546 sg_set_page(sg, page, PAGE_SIZE, 0);
2551 if (!i || page_to_pfn(page) != last_pfn + 1) {
2555 sg_set_page(sg, page, PAGE_SIZE, 0);
2557 sg->length += PAGE_SIZE;
2559 last_pfn = page_to_pfn(page);
2561 /* Check that the i965g/gm workaround works. */
2562 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2564 #ifdef CONFIG_SWIOTLB
2565 if (!swiotlb_nr_tbl())
2570 ret = i915_gem_gtt_prepare_object(obj);
2574 if (i915_gem_object_needs_bit17_swizzle(obj))
2575 i915_gem_object_do_bit_17_swizzle(obj);
2577 if (obj->tiling_mode != I915_TILING_NONE &&
2578 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2579 i915_gem_object_pin_pages(obj);
2585 for_each_sgt_page(page, sgt_iter, st)
2590 /* shmemfs first checks if there is enough memory to allocate the page
2591 * and reports ENOSPC should there be insufficient, along with the usual
2592 * ENOMEM for a genuine allocation failure.
2594 * We use ENOSPC in our driver to mean that we have run out of aperture
2595 * space and so want to translate the error from shmemfs back to our
2596 * usual understanding of ENOMEM.
2604 /* Ensure that the associated pages are gathered from the backing storage
2605 * and pinned into our object. i915_gem_object_get_pages() may be called
2606 * multiple times before they are released by a single call to
2607 * i915_gem_object_put_pages() - once the pages are no longer referenced
2608 * either as a result of memory pressure (reaping pages under the shrinker)
2609 * or as the object is itself released.
2612 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2614 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2615 const struct drm_i915_gem_object_ops *ops = obj->ops;
2621 if (obj->madv != I915_MADV_WILLNEED) {
2622 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2626 BUG_ON(obj->pages_pin_count);
2628 ret = ops->get_pages(obj);
2632 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2634 obj->get_page.sg = obj->pages->sgl;
2635 obj->get_page.last = 0;
2640 /* The 'mapping' part of i915_gem_object_pin_map() below */
2641 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2643 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2644 struct sg_table *sgt = obj->pages;
2645 struct sgt_iter sgt_iter;
2647 struct page *stack_pages[32];
2648 struct page **pages = stack_pages;
2649 unsigned long i = 0;
2652 /* A single page can always be kmapped */
2654 return kmap(sg_page(sgt->sgl));
2656 if (n_pages > ARRAY_SIZE(stack_pages)) {
2657 /* Too big for stack -- allocate temporary array instead */
2658 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2663 for_each_sgt_page(page, sgt_iter, sgt)
2666 /* Check that we have the expected number of pages */
2667 GEM_BUG_ON(i != n_pages);
2669 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2671 if (pages != stack_pages)
2672 drm_free_large(pages);
2677 /* get, pin, and map the pages of the object into kernel space */
2678 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2682 lockdep_assert_held(&obj->base.dev->struct_mutex);
2684 ret = i915_gem_object_get_pages(obj);
2686 return ERR_PTR(ret);
2688 i915_gem_object_pin_pages(obj);
2690 if (!obj->mapping) {
2691 obj->mapping = i915_gem_object_map(obj);
2692 if (!obj->mapping) {
2693 i915_gem_object_unpin_pages(obj);
2694 return ERR_PTR(-ENOMEM);
2698 return obj->mapping;
2701 void i915_vma_move_to_active(struct i915_vma *vma,
2702 struct drm_i915_gem_request *req)
2704 struct drm_i915_gem_object *obj = vma->obj;
2705 struct intel_engine_cs *engine;
2707 engine = i915_gem_request_get_engine(req);
2709 /* Add a reference if we're newly entering the active list. */
2710 if (obj->active == 0)
2711 drm_gem_object_reference(&obj->base);
2712 obj->active |= intel_engine_flag(engine);
2714 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2715 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2717 list_move_tail(&vma->vm_link, &vma->vm->active_list);
2721 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2723 GEM_BUG_ON(obj->last_write_req == NULL);
2724 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2726 i915_gem_request_assign(&obj->last_write_req, NULL);
2727 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2731 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2733 struct i915_vma *vma;
2735 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2736 GEM_BUG_ON(!(obj->active & (1 << ring)));
2738 list_del_init(&obj->engine_list[ring]);
2739 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2741 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2742 i915_gem_object_retire__write(obj);
2744 obj->active &= ~(1 << ring);
2748 /* Bump our place on the bound list to keep it roughly in LRU order
2749 * so that we don't steal from recently used but inactive objects
2750 * (unless we are forced to ofc!)
2752 list_move_tail(&obj->global_list,
2753 &to_i915(obj->base.dev)->mm.bound_list);
2755 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2756 if (!list_empty(&vma->vm_link))
2757 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2760 i915_gem_request_assign(&obj->last_fenced_req, NULL);
2761 drm_gem_object_unreference(&obj->base);
2765 i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2767 struct intel_engine_cs *engine;
2770 /* Carefully retire all requests without writing to the rings */
2771 for_each_engine(engine, dev_priv) {
2772 ret = intel_engine_idle(engine);
2776 i915_gem_retire_requests(dev_priv);
2778 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
2779 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
2780 while (intel_kick_waiters(dev_priv) ||
2781 intel_kick_signalers(dev_priv))
2785 /* Finally reset hw state */
2786 for_each_engine(engine, dev_priv)
2787 intel_ring_init_seqno(engine, seqno);
2792 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2794 struct drm_i915_private *dev_priv = to_i915(dev);
2800 /* HWS page needs to be set less than what we
2801 * will inject to ring
2803 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2807 /* Carefully set the last_seqno value so that wrap
2808 * detection still works
2810 dev_priv->next_seqno = seqno;
2811 dev_priv->last_seqno = seqno - 1;
2812 if (dev_priv->last_seqno == 0)
2813 dev_priv->last_seqno--;
2819 i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2821 /* reserve 0 for non-seqno */
2822 if (dev_priv->next_seqno == 0) {
2823 int ret = i915_gem_init_seqno(dev_priv, 0);
2827 dev_priv->next_seqno = 1;
2830 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2834 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
2836 struct drm_i915_private *dev_priv = engine->i915;
2838 dev_priv->gt.active_engines |= intel_engine_flag(engine);
2839 if (dev_priv->gt.awake)
2842 intel_runtime_pm_get_noresume(dev_priv);
2843 dev_priv->gt.awake = true;
2845 i915_update_gfx_val(dev_priv);
2846 if (INTEL_GEN(dev_priv) >= 6)
2847 gen6_rps_busy(dev_priv);
2849 queue_delayed_work(dev_priv->wq,
2850 &dev_priv->gt.retire_work,
2851 round_jiffies_up_relative(HZ));
2855 * NB: This function is not allowed to fail. Doing so would mean the the
2856 * request is not being tracked for completion but the work itself is
2857 * going to happen on the hardware. This would be a Bad Thing(tm).
2859 void __i915_add_request(struct drm_i915_gem_request *request,
2860 struct drm_i915_gem_object *obj,
2863 struct intel_engine_cs *engine;
2864 struct intel_ringbuffer *ringbuf;
2869 if (WARN_ON(request == NULL))
2872 engine = request->engine;
2873 ringbuf = request->ringbuf;
2876 * To ensure that this call will not fail, space for its emissions
2877 * should already have been reserved in the ring buffer. Let the ring
2878 * know that it is time to use that space up.
2880 request_start = intel_ring_get_tail(ringbuf);
2881 reserved_tail = request->reserved_space;
2882 request->reserved_space = 0;
2885 * Emit any outstanding flushes - execbuf can fail to emit the flush
2886 * after having emitted the batchbuffer command. Hence we need to fix
2887 * things up similar to emitting the lazy request. The difference here
2888 * is that the flush _must_ happen before the next request, no matter
2892 if (i915.enable_execlists)
2893 ret = logical_ring_flush_all_caches(request);
2895 ret = intel_ring_flush_all_caches(request);
2896 /* Not allowed to fail! */
2897 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2900 trace_i915_gem_request_add(request);
2902 request->head = request_start;
2904 /* Whilst this request exists, batch_obj will be on the
2905 * active_list, and so will hold the active reference. Only when this
2906 * request is retired will the the batch_obj be moved onto the
2907 * inactive_list and lose its active reference. Hence we do not need
2908 * to explicitly hold another reference here.
2910 request->batch_obj = obj;
2912 /* Seal the request and mark it as pending execution. Note that
2913 * we may inspect this state, without holding any locks, during
2914 * hangcheck. Hence we apply the barrier to ensure that we do not
2915 * see a more recent value in the hws than we are tracking.
2917 request->emitted_jiffies = jiffies;
2918 request->previous_seqno = engine->last_submitted_seqno;
2919 smp_store_mb(engine->last_submitted_seqno, request->seqno);
2920 list_add_tail(&request->list, &engine->request_list);
2922 /* Record the position of the start of the request so that
2923 * should we detect the updated seqno part-way through the
2924 * GPU processing the request, we never over-estimate the
2925 * position of the head.
2927 request->postfix = intel_ring_get_tail(ringbuf);
2929 if (i915.enable_execlists)
2930 ret = engine->emit_request(request);
2932 ret = engine->add_request(request);
2934 request->tail = intel_ring_get_tail(ringbuf);
2936 /* Not allowed to fail! */
2937 WARN(ret, "emit|add_request failed: %d!\n", ret);
2938 /* Sanity check that the reserved size was large enough. */
2939 ret = intel_ring_get_tail(ringbuf) - request_start;
2941 ret += ringbuf->size;
2942 WARN_ONCE(ret > reserved_tail,
2943 "Not enough space reserved (%d bytes) "
2944 "for adding the request (%d bytes)\n",
2945 reserved_tail, ret);
2947 i915_gem_mark_busy(engine);
2950 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2952 unsigned long elapsed;
2954 if (ctx->hang_stats.banned)
2957 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2958 if (ctx->hang_stats.ban_period_seconds &&
2959 elapsed <= ctx->hang_stats.ban_period_seconds) {
2960 DRM_DEBUG("context hanging too fast, banning!\n");
2967 static void i915_set_reset_status(struct i915_gem_context *ctx,
2970 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2973 hs->banned = i915_context_is_banned(ctx);
2975 hs->guilty_ts = get_seconds();
2977 hs->batch_pending++;
2981 void i915_gem_request_free(struct kref *req_ref)
2983 struct drm_i915_gem_request *req = container_of(req_ref,
2985 kmem_cache_free(req->i915->requests, req);
2989 __i915_gem_request_alloc(struct intel_engine_cs *engine,
2990 struct i915_gem_context *ctx,
2991 struct drm_i915_gem_request **req_out)
2993 struct drm_i915_private *dev_priv = engine->i915;
2994 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2995 struct drm_i915_gem_request *req;
3003 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
3004 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
3007 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
3011 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
3015 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
3019 kref_init(&req->ref);
3020 req->i915 = dev_priv;
3021 req->engine = engine;
3023 i915_gem_context_reference(req->ctx);
3026 * Reserve space in the ring buffer for all the commands required to
3027 * eventually emit this request. This is to guarantee that the
3028 * i915_add_request() call can't fail. Note that the reserve may need
3029 * to be redone if the request is not actually submitted straight
3030 * away, e.g. because a GPU scheduler has deferred it.
3032 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
3034 if (i915.enable_execlists)
3035 ret = intel_logical_ring_alloc_request_extras(req);
3037 ret = intel_ring_alloc_request_extras(req);
3045 i915_gem_context_unreference(ctx);
3047 kmem_cache_free(dev_priv->requests, req);
3052 * i915_gem_request_alloc - allocate a request structure
3054 * @engine: engine that we wish to issue the request on.
3055 * @ctx: context that the request will be associated with.
3056 * This can be NULL if the request is not directly related to
3057 * any specific user context, in which case this function will
3058 * choose an appropriate context to use.
3060 * Returns a pointer to the allocated request if successful,
3061 * or an error code if not.
3063 struct drm_i915_gem_request *
3064 i915_gem_request_alloc(struct intel_engine_cs *engine,
3065 struct i915_gem_context *ctx)
3067 struct drm_i915_gem_request *req;
3071 ctx = engine->i915->kernel_context;
3072 err = __i915_gem_request_alloc(engine, ctx, &req);
3073 return err ? ERR_PTR(err) : req;
3076 struct drm_i915_gem_request *
3077 i915_gem_find_active_request(struct intel_engine_cs *engine)
3079 struct drm_i915_gem_request *request;
3081 /* We are called by the error capture and reset at a random
3082 * point in time. In particular, note that neither is crucially
3083 * ordered with an interrupt. After a hang, the GPU is dead and we
3084 * assume that no more writes can happen (we waited long enough for
3085 * all writes that were in transaction to be flushed) - adding an
3086 * extra delay for a recent interrupt is pointless. Hence, we do
3087 * not need an engine->irq_seqno_barrier() before the seqno reads.
3089 list_for_each_entry(request, &engine->request_list, list) {
3090 if (i915_gem_request_completed(request))
3099 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
3101 struct drm_i915_gem_request *request;
3104 request = i915_gem_find_active_request(engine);
3105 if (request == NULL)
3108 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
3110 i915_set_reset_status(request->ctx, ring_hung);
3111 list_for_each_entry_continue(request, &engine->request_list, list)
3112 i915_set_reset_status(request->ctx, false);
3115 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
3117 struct intel_ringbuffer *buffer;
3119 while (!list_empty(&engine->active_list)) {
3120 struct drm_i915_gem_object *obj;
3122 obj = list_first_entry(&engine->active_list,
3123 struct drm_i915_gem_object,
3124 engine_list[engine->id]);
3126 i915_gem_object_retire__read(obj, engine->id);
3130 * Clear the execlists queue up before freeing the requests, as those
3131 * are the ones that keep the context and ringbuffer backing objects
3135 if (i915.enable_execlists) {
3136 /* Ensure irq handler finishes or is cancelled. */
3137 tasklet_kill(&engine->irq_tasklet);
3139 intel_execlists_cancel_requests(engine);
3143 * We must free the requests after all the corresponding objects have
3144 * been moved off active lists. Which is the same order as the normal
3145 * retire_requests function does. This is important if object hold
3146 * implicit references on things like e.g. ppgtt address spaces through
3149 while (!list_empty(&engine->request_list)) {
3150 struct drm_i915_gem_request *request;
3152 request = list_first_entry(&engine->request_list,
3153 struct drm_i915_gem_request,
3156 i915_gem_request_retire(request);
3159 /* Having flushed all requests from all queues, we know that all
3160 * ringbuffers must now be empty. However, since we do not reclaim
3161 * all space when retiring the request (to prevent HEADs colliding
3162 * with rapid ringbuffer wraparound) the amount of available space
3163 * upon reset is less than when we start. Do one more pass over
3164 * all the ringbuffers to reset last_retired_head.
3166 list_for_each_entry(buffer, &engine->buffers, link) {
3167 buffer->last_retired_head = buffer->tail;
3168 intel_ring_update_space(buffer);
3171 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
3173 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
3176 void i915_gem_reset(struct drm_device *dev)
3178 struct drm_i915_private *dev_priv = to_i915(dev);
3179 struct intel_engine_cs *engine;
3182 * Before we free the objects from the requests, we need to inspect
3183 * them for finding the guilty party. As the requests only borrow
3184 * their reference to the objects, the inspection must be done first.
3186 for_each_engine(engine, dev_priv)
3187 i915_gem_reset_engine_status(engine);
3189 for_each_engine(engine, dev_priv)
3190 i915_gem_reset_engine_cleanup(engine);
3191 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
3193 i915_gem_context_reset(dev);
3195 i915_gem_restore_fences(dev);
3197 WARN_ON(i915_verify_lists(dev));
3201 * This function clears the request list as sequence numbers are passed.
3202 * @engine: engine to retire requests on
3205 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3207 WARN_ON(i915_verify_lists(engine->dev));
3209 /* Retire requests first as we use it above for the early return.
3210 * If we retire requests last, we may use a later seqno and so clear
3211 * the requests lists without clearing the active list, leading to
3214 while (!list_empty(&engine->request_list)) {
3215 struct drm_i915_gem_request *request;
3217 request = list_first_entry(&engine->request_list,
3218 struct drm_i915_gem_request,
3221 if (!i915_gem_request_completed(request))
3224 i915_gem_request_retire(request);
3227 /* Move any buffers on the active list that are no longer referenced
3228 * by the ringbuffer to the flushing/inactive lists as appropriate,
3229 * before we free the context associated with the requests.
3231 while (!list_empty(&engine->active_list)) {
3232 struct drm_i915_gem_object *obj;
3234 obj = list_first_entry(&engine->active_list,
3235 struct drm_i915_gem_object,
3236 engine_list[engine->id]);
3238 if (!list_empty(&obj->last_read_req[engine->id]->list))
3241 i915_gem_object_retire__read(obj, engine->id);
3244 WARN_ON(i915_verify_lists(engine->dev));
3247 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3249 struct intel_engine_cs *engine;
3251 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3253 if (dev_priv->gt.active_engines == 0)
3256 GEM_BUG_ON(!dev_priv->gt.awake);
3258 for_each_engine(engine, dev_priv) {
3259 i915_gem_retire_requests_ring(engine);
3260 if (list_empty(&engine->request_list))
3261 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
3264 if (dev_priv->gt.active_engines == 0)
3265 queue_delayed_work(dev_priv->wq,
3266 &dev_priv->gt.idle_work,
3267 msecs_to_jiffies(100));
3271 i915_gem_retire_work_handler(struct work_struct *work)
3273 struct drm_i915_private *dev_priv =
3274 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3275 struct drm_device *dev = &dev_priv->drm;
3277 /* Come back later if the device is busy... */
3278 if (mutex_trylock(&dev->struct_mutex)) {
3279 i915_gem_retire_requests(dev_priv);
3280 mutex_unlock(&dev->struct_mutex);
3283 /* Keep the retire handler running until we are finally idle.
3284 * We do not need to do this test under locking as in the worst-case
3285 * we queue the retire worker once too often.
3287 if (READ_ONCE(dev_priv->gt.awake))
3288 queue_delayed_work(dev_priv->wq,
3289 &dev_priv->gt.retire_work,
3290 round_jiffies_up_relative(HZ));
3294 i915_gem_idle_work_handler(struct work_struct *work)
3296 struct drm_i915_private *dev_priv =
3297 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3298 struct drm_device *dev = &dev_priv->drm;
3299 struct intel_engine_cs *engine;
3300 unsigned int stuck_engines;
3301 bool rearm_hangcheck;
3303 if (!READ_ONCE(dev_priv->gt.awake))
3306 if (READ_ONCE(dev_priv->gt.active_engines))
3310 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3312 if (!mutex_trylock(&dev->struct_mutex)) {
3313 /* Currently busy, come back later */
3314 mod_delayed_work(dev_priv->wq,
3315 &dev_priv->gt.idle_work,
3316 msecs_to_jiffies(50));
3320 if (dev_priv->gt.active_engines)
3323 for_each_engine(engine, dev_priv)
3324 i915_gem_batch_pool_fini(&engine->batch_pool);
3326 GEM_BUG_ON(!dev_priv->gt.awake);
3327 dev_priv->gt.awake = false;
3328 rearm_hangcheck = false;
3330 stuck_engines = intel_kick_waiters(dev_priv);
3331 if (unlikely(stuck_engines)) {
3332 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
3333 dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
3336 if (INTEL_GEN(dev_priv) >= 6)
3337 gen6_rps_idle(dev_priv);
3338 intel_runtime_pm_put(dev_priv);
3340 mutex_unlock(&dev->struct_mutex);
3343 if (rearm_hangcheck) {
3344 GEM_BUG_ON(!dev_priv->gt.awake);
3345 i915_queue_hangcheck(dev_priv);
3350 * Ensures that an object will eventually get non-busy by flushing any required
3351 * write domains, emitting any outstanding lazy request and retiring and
3352 * completed requests.
3353 * @obj: object to flush
3356 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3363 for (i = 0; i < I915_NUM_ENGINES; i++) {
3364 struct drm_i915_gem_request *req;
3366 req = obj->last_read_req[i];
3370 if (i915_gem_request_completed(req))
3371 i915_gem_object_retire__read(obj, i);
3378 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3379 * @dev: drm device pointer
3380 * @data: ioctl data blob
3381 * @file: drm file pointer
3383 * Returns 0 if successful, else an error is returned with the remaining time in
3384 * the timeout parameter.
3385 * -ETIME: object is still busy after timeout
3386 * -ERESTARTSYS: signal interrupted the wait
3387 * -ENONENT: object doesn't exist
3388 * Also possible, but rare:
3389 * -EAGAIN: GPU wedged
3391 * -ENODEV: Internal IRQ fail
3392 * -E?: The add request failed
3394 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3395 * non-zero timeout parameter the wait ioctl will wait for the given number of
3396 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3397 * without holding struct_mutex the object may become re-busied before this
3398 * function completes. A similar but shorter * race condition exists in the busy
3402 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3404 struct drm_i915_gem_wait *args = data;
3405 struct drm_i915_gem_object *obj;
3406 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3410 if (args->flags != 0)
3413 ret = i915_mutex_lock_interruptible(dev);
3417 obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
3418 if (&obj->base == NULL) {
3419 mutex_unlock(&dev->struct_mutex);
3423 /* Need to make sure the object gets inactive eventually. */
3424 ret = i915_gem_object_flush_active(obj);
3431 /* Do this after OLR check to make sure we make forward progress polling
3432 * on this IOCTL with a timeout == 0 (like busy ioctl)
3434 if (args->timeout_ns == 0) {
3439 drm_gem_object_unreference(&obj->base);
3441 for (i = 0; i < I915_NUM_ENGINES; i++) {
3442 if (obj->last_read_req[i] == NULL)
3445 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3448 mutex_unlock(&dev->struct_mutex);
3450 for (i = 0; i < n; i++) {
3452 ret = __i915_wait_request(req[i], true,
3453 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3454 to_rps_client(file));
3455 i915_gem_request_unreference(req[i]);
3460 drm_gem_object_unreference(&obj->base);
3461 mutex_unlock(&dev->struct_mutex);
3466 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3467 struct intel_engine_cs *to,
3468 struct drm_i915_gem_request *from_req,
3469 struct drm_i915_gem_request **to_req)
3471 struct intel_engine_cs *from;
3474 from = i915_gem_request_get_engine(from_req);
3478 if (i915_gem_request_completed(from_req))
3481 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3482 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3483 ret = __i915_wait_request(from_req,
3484 i915->mm.interruptible,
3486 &i915->rps.semaphores);
3490 i915_gem_object_retire_request(obj, from_req);
3492 int idx = intel_ring_sync_index(from, to);
3493 u32 seqno = i915_gem_request_get_seqno(from_req);
3497 if (seqno <= from->semaphore.sync_seqno[idx])
3500 if (*to_req == NULL) {
3501 struct drm_i915_gem_request *req;
3503 req = i915_gem_request_alloc(to, NULL);
3505 return PTR_ERR(req);
3510 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3511 ret = to->semaphore.sync_to(*to_req, from, seqno);
3515 /* We use last_read_req because sync_to()
3516 * might have just caused seqno wrap under
3519 from->semaphore.sync_seqno[idx] =
3520 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3527 * i915_gem_object_sync - sync an object to a ring.
3529 * @obj: object which may be in use on another ring.
3530 * @to: ring we wish to use the object on. May be NULL.
3531 * @to_req: request we wish to use the object for. See below.
3532 * This will be allocated and returned if a request is
3533 * required but not passed in.
3535 * This code is meant to abstract object synchronization with the GPU.
3536 * Calling with NULL implies synchronizing the object with the CPU
3537 * rather than a particular GPU ring. Conceptually we serialise writes
3538 * between engines inside the GPU. We only allow one engine to write
3539 * into a buffer at any time, but multiple readers. To ensure each has
3540 * a coherent view of memory, we must:
3542 * - If there is an outstanding write request to the object, the new
3543 * request must wait for it to complete (either CPU or in hw, requests
3544 * on the same ring will be naturally ordered).
3546 * - If we are a write request (pending_write_domain is set), the new
3547 * request must wait for outstanding read requests to complete.
3549 * For CPU synchronisation (NULL to) no request is required. For syncing with
3550 * rings to_req must be non-NULL. However, a request does not have to be
3551 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3552 * request will be allocated automatically and returned through *to_req. Note
3553 * that it is not guaranteed that commands will be emitted (because the system
3554 * might already be idle). Hence there is no need to create a request that
3555 * might never have any work submitted. Note further that if a request is
3556 * returned in *to_req, it is the responsibility of the caller to submit
3557 * that request (after potentially adding more work to it).
3559 * Returns 0 if successful, else propagates up the lower layer error.
3562 i915_gem_object_sync(struct drm_i915_gem_object *obj,
3563 struct intel_engine_cs *to,
3564 struct drm_i915_gem_request **to_req)
3566 const bool readonly = obj->base.pending_write_domain == 0;
3567 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3574 return i915_gem_object_wait_rendering(obj, readonly);
3578 if (obj->last_write_req)
3579 req[n++] = obj->last_write_req;
3581 for (i = 0; i < I915_NUM_ENGINES; i++)
3582 if (obj->last_read_req[i])
3583 req[n++] = obj->last_read_req[i];
3585 for (i = 0; i < n; i++) {
3586 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3594 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3596 u32 old_write_domain, old_read_domains;
3598 /* Force a pagefault for domain tracking on next user access */
3599 i915_gem_release_mmap(obj);
3601 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3604 old_read_domains = obj->base.read_domains;
3605 old_write_domain = obj->base.write_domain;
3607 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3608 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3610 trace_i915_gem_object_change_domain(obj,
3615 static void __i915_vma_iounmap(struct i915_vma *vma)
3617 GEM_BUG_ON(vma->pin_count);
3619 if (vma->iomap == NULL)
3622 io_mapping_unmap(vma->iomap);
3626 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3628 struct drm_i915_gem_object *obj = vma->obj;
3629 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3632 if (list_empty(&vma->obj_link))
3635 if (!drm_mm_node_allocated(&vma->node)) {
3636 i915_gem_vma_destroy(vma);
3643 BUG_ON(obj->pages == NULL);
3646 ret = i915_gem_object_wait_rendering(obj, false);
3651 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3652 i915_gem_object_finish_gtt(obj);
3654 /* release the fence reg _after_ flushing */
3655 ret = i915_gem_object_put_fence(obj);
3659 __i915_vma_iounmap(vma);
3662 trace_i915_vma_unbind(vma);
3664 vma->vm->unbind_vma(vma);
3667 list_del_init(&vma->vm_link);
3669 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3670 obj->map_and_fenceable = false;
3671 } else if (vma->ggtt_view.pages) {
3672 sg_free_table(vma->ggtt_view.pages);
3673 kfree(vma->ggtt_view.pages);
3675 vma->ggtt_view.pages = NULL;
3678 drm_mm_remove_node(&vma->node);
3679 i915_gem_vma_destroy(vma);
3681 /* Since the unbound list is global, only move to that list if
3682 * no more VMAs exist. */
3683 if (list_empty(&obj->vma_list))
3684 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3686 /* And finally now the object is completely decoupled from this vma,
3687 * we can drop its hold on the backing storage and allow it to be
3688 * reaped by the shrinker.
3690 i915_gem_object_unpin_pages(obj);
3695 int i915_vma_unbind(struct i915_vma *vma)
3697 return __i915_vma_unbind(vma, true);
3700 int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3702 return __i915_vma_unbind(vma, false);
3705 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
3707 struct intel_engine_cs *engine;
3710 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3712 for_each_engine(engine, dev_priv) {
3713 if (engine->last_context == NULL)
3716 ret = intel_engine_idle(engine);
3721 WARN_ON(i915_verify_lists(dev));
3725 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3726 unsigned long cache_level)
3728 struct drm_mm_node *gtt_space = &vma->node;
3729 struct drm_mm_node *other;
3732 * On some machines we have to be careful when putting differing types
3733 * of snoopable memory together to avoid the prefetcher crossing memory
3734 * domains and dying. During vm initialisation, we decide whether or not
3735 * these constraints apply and set the drm_mm.color_adjust
3738 if (vma->vm->mm.color_adjust == NULL)
3741 if (!drm_mm_node_allocated(gtt_space))
3744 if (list_empty(>t_space->node_list))
3747 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3748 if (other->allocated && !other->hole_follows && other->color != cache_level)
3751 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3752 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3759 * Finds free space in the GTT aperture and binds the object or a view of it
3761 * @obj: object to bind
3762 * @vm: address space to bind into
3763 * @ggtt_view: global gtt view if applicable
3764 * @alignment: requested alignment
3765 * @flags: mask of PIN_* flags to use
3767 static struct i915_vma *
3768 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3769 struct i915_address_space *vm,
3770 const struct i915_ggtt_view *ggtt_view,
3774 struct drm_device *dev = obj->base.dev;
3775 struct drm_i915_private *dev_priv = to_i915(dev);
3776 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3777 u32 fence_alignment, unfenced_alignment;
3778 u32 search_flag, alloc_flag;
3780 u64 size, fence_size;
3781 struct i915_vma *vma;
3784 if (i915_is_ggtt(vm)) {
3787 if (WARN_ON(!ggtt_view))
3788 return ERR_PTR(-EINVAL);
3790 view_size = i915_ggtt_view_size(obj, ggtt_view);
3792 fence_size = i915_gem_get_gtt_size(dev,
3795 fence_alignment = i915_gem_get_gtt_alignment(dev,
3799 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3803 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3805 fence_size = i915_gem_get_gtt_size(dev,
3808 fence_alignment = i915_gem_get_gtt_alignment(dev,
3812 unfenced_alignment =
3813 i915_gem_get_gtt_alignment(dev,
3817 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3820 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3822 if (flags & PIN_MAPPABLE)
3823 end = min_t(u64, end, ggtt->mappable_end);
3824 if (flags & PIN_ZONE_4G)
3825 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3828 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3830 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3831 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3832 ggtt_view ? ggtt_view->type : 0,
3834 return ERR_PTR(-EINVAL);
3837 /* If binding the object/GGTT view requires more space than the entire
3838 * aperture has, reject it early before evicting everything in a vain
3839 * attempt to find space.
3842 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3843 ggtt_view ? ggtt_view->type : 0,
3845 flags & PIN_MAPPABLE ? "mappable" : "total",
3847 return ERR_PTR(-E2BIG);
3850 ret = i915_gem_object_get_pages(obj);
3852 return ERR_PTR(ret);
3854 i915_gem_object_pin_pages(obj);
3856 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3857 i915_gem_obj_lookup_or_create_vma(obj, vm);
3862 if (flags & PIN_OFFSET_FIXED) {
3863 uint64_t offset = flags & PIN_OFFSET_MASK;
3865 if (offset & (alignment - 1) || offset + size > end) {
3869 vma->node.start = offset;
3870 vma->node.size = size;
3871 vma->node.color = obj->cache_level;
3872 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3874 ret = i915_gem_evict_for_vma(vma);
3876 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3881 if (flags & PIN_HIGH) {
3882 search_flag = DRM_MM_SEARCH_BELOW;
3883 alloc_flag = DRM_MM_CREATE_TOP;
3885 search_flag = DRM_MM_SEARCH_DEFAULT;
3886 alloc_flag = DRM_MM_CREATE_DEFAULT;
3890 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3897 ret = i915_gem_evict_something(dev, vm, size, alignment,
3907 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3909 goto err_remove_node;
3912 trace_i915_vma_bind(vma, flags);
3913 ret = i915_vma_bind(vma, obj->cache_level, flags);
3915 goto err_remove_node;
3917 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3918 list_add_tail(&vma->vm_link, &vm->inactive_list);
3923 drm_mm_remove_node(&vma->node);
3925 i915_gem_vma_destroy(vma);
3928 i915_gem_object_unpin_pages(obj);
3933 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3936 /* If we don't have a page list set up, then we're not pinned
3937 * to GPU, and we can ignore the cache flush because it'll happen
3938 * again at bind time.
3940 if (obj->pages == NULL)
3944 * Stolen memory is always coherent with the GPU as it is explicitly
3945 * marked as wc by the system, or the system is cache-coherent.
3947 if (obj->stolen || obj->phys_handle)
3950 /* If the GPU is snooping the contents of the CPU cache,
3951 * we do not need to manually clear the CPU cache lines. However,
3952 * the caches are only snooped when the render cache is
3953 * flushed/invalidated. As we always have to emit invalidations
3954 * and flushes when moving into and out of the RENDER domain, correct
3955 * snooping behaviour occurs naturally as the result of our domain
3958 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3959 obj->cache_dirty = true;
3963 trace_i915_gem_object_clflush(obj);
3964 drm_clflush_sg(obj->pages);
3965 obj->cache_dirty = false;
3970 /** Flushes the GTT write domain for the object if it's dirty. */
3972 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3974 uint32_t old_write_domain;
3976 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3979 /* No actual flushing is required for the GTT write domain. Writes
3980 * to it immediately go to main memory as far as we know, so there's
3981 * no chipset flush. It also doesn't land in render cache.
3983 * However, we do have to enforce the order so that all writes through
3984 * the GTT land before any writes to the device, such as updates to
3989 old_write_domain = obj->base.write_domain;
3990 obj->base.write_domain = 0;
3992 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3994 trace_i915_gem_object_change_domain(obj,
3995 obj->base.read_domains,
3999 /** Flushes the CPU write domain for the object if it's dirty. */
4001 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
4003 uint32_t old_write_domain;
4005 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
4008 if (i915_gem_clflush_object(obj, obj->pin_display))
4009 i915_gem_chipset_flush(to_i915(obj->base.dev));
4011 old_write_domain = obj->base.write_domain;
4012 obj->base.write_domain = 0;
4014 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
4016 trace_i915_gem_object_change_domain(obj,
4017 obj->base.read_domains,
4022 * Moves a single object to the GTT read, and possibly write domain.
4023 * @obj: object to act on
4024 * @write: ask for write access or read only
4026 * This function returns when the move is complete, including waiting on
4030 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
4032 struct drm_device *dev = obj->base.dev;
4033 struct drm_i915_private *dev_priv = to_i915(dev);
4034 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4035 uint32_t old_write_domain, old_read_domains;
4036 struct i915_vma *vma;
4039 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
4042 ret = i915_gem_object_wait_rendering(obj, !write);
4046 /* Flush and acquire obj->pages so that we are coherent through
4047 * direct access in memory with previous cached writes through
4048 * shmemfs and that our cache domain tracking remains valid.
4049 * For example, if the obj->filp was moved to swap without us
4050 * being notified and releasing the pages, we would mistakenly
4051 * continue to assume that the obj remained out of the CPU cached
4054 ret = i915_gem_object_get_pages(obj);
4058 i915_gem_object_flush_cpu_write_domain(obj);
4060 /* Serialise direct access to this object with the barriers for
4061 * coherent writes from the GPU, by effectively invalidating the
4062 * GTT domain upon first access.
4064 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
4067 old_write_domain = obj->base.write_domain;
4068 old_read_domains = obj->base.read_domains;
4070 /* It should now be out of any other write domains, and we can update
4071 * the domain values for our changes.
4073 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
4074 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4076 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
4077 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
4081 trace_i915_gem_object_change_domain(obj,
4085 /* And bump the LRU for this access */
4086 vma = i915_gem_obj_to_ggtt(obj);
4087 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
4088 list_move_tail(&vma->vm_link,
4089 &ggtt->base.inactive_list);
4095 * Changes the cache-level of an object across all VMA.
4096 * @obj: object to act on
4097 * @cache_level: new cache level to set for the object
4099 * After this function returns, the object will be in the new cache-level
4100 * across all GTT and the contents of the backing storage will be coherent,
4101 * with respect to the new cache-level. In order to keep the backing storage
4102 * coherent for all users, we only allow a single cache level to be set
4103 * globally on the object and prevent it from being changed whilst the
4104 * hardware is reading from the object. That is if the object is currently
4105 * on the scanout it will be set to uncached (or equivalent display
4106 * cache coherency) and all non-MOCS GPU access will also be uncached so
4107 * that all direct access to the scanout remains coherent.
4109 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
4110 enum i915_cache_level cache_level)
4112 struct drm_device *dev = obj->base.dev;
4113 struct i915_vma *vma, *next;
4117 if (obj->cache_level == cache_level)
4120 /* Inspect the list of currently bound VMA and unbind any that would
4121 * be invalid given the new cache-level. This is principally to
4122 * catch the issue of the CS prefetch crossing page boundaries and
4123 * reading an invalid PTE on older architectures.
4125 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4126 if (!drm_mm_node_allocated(&vma->node))
4129 if (vma->pin_count) {
4130 DRM_DEBUG("can not change the cache level of pinned objects\n");
4134 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
4135 ret = i915_vma_unbind(vma);
4142 /* We can reuse the existing drm_mm nodes but need to change the
4143 * cache-level on the PTE. We could simply unbind them all and
4144 * rebind with the correct cache-level on next use. However since
4145 * we already have a valid slot, dma mapping, pages etc, we may as
4146 * rewrite the PTE in the belief that doing so tramples upon less
4147 * state and so involves less work.
4150 /* Before we change the PTE, the GPU must not be accessing it.
4151 * If we wait upon the object, we know that all the bound
4152 * VMA are no longer active.
4154 ret = i915_gem_object_wait_rendering(obj, false);
4158 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
4159 /* Access to snoopable pages through the GTT is
4160 * incoherent and on some machines causes a hard
4161 * lockup. Relinquish the CPU mmaping to force
4162 * userspace to refault in the pages and we can
4163 * then double check if the GTT mapping is still
4164 * valid for that pointer access.
4166 i915_gem_release_mmap(obj);
4168 /* As we no longer need a fence for GTT access,
4169 * we can relinquish it now (and so prevent having
4170 * to steal a fence from someone else on the next
4171 * fence request). Note GPU activity would have
4172 * dropped the fence as all snoopable access is
4173 * supposed to be linear.
4175 ret = i915_gem_object_put_fence(obj);
4179 /* We either have incoherent backing store and
4180 * so no GTT access or the architecture is fully
4181 * coherent. In such cases, existing GTT mmaps
4182 * ignore the cache bit in the PTE and we can
4183 * rewrite it without confusing the GPU or having
4184 * to force userspace to fault back in its mmaps.
4188 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4189 if (!drm_mm_node_allocated(&vma->node))
4192 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
4198 list_for_each_entry(vma, &obj->vma_list, obj_link)
4199 vma->node.color = cache_level;
4200 obj->cache_level = cache_level;
4203 /* Flush the dirty CPU caches to the backing storage so that the
4204 * object is now coherent at its new cache level (with respect
4205 * to the access domain).
4207 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
4208 if (i915_gem_clflush_object(obj, true))
4209 i915_gem_chipset_flush(to_i915(obj->base.dev));
4215 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4216 struct drm_file *file)
4218 struct drm_i915_gem_caching *args = data;
4219 struct drm_i915_gem_object *obj;
4221 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4222 if (&obj->base == NULL)
4225 switch (obj->cache_level) {
4226 case I915_CACHE_LLC:
4227 case I915_CACHE_L3_LLC:
4228 args->caching = I915_CACHING_CACHED;
4232 args->caching = I915_CACHING_DISPLAY;
4236 args->caching = I915_CACHING_NONE;
4240 drm_gem_object_unreference_unlocked(&obj->base);
4244 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4245 struct drm_file *file)
4247 struct drm_i915_private *dev_priv = to_i915(dev);
4248 struct drm_i915_gem_caching *args = data;
4249 struct drm_i915_gem_object *obj;
4250 enum i915_cache_level level;
4253 switch (args->caching) {
4254 case I915_CACHING_NONE:
4255 level = I915_CACHE_NONE;
4257 case I915_CACHING_CACHED:
4259 * Due to a HW issue on BXT A stepping, GPU stores via a
4260 * snooped mapping may leave stale data in a corresponding CPU
4261 * cacheline, whereas normally such cachelines would get
4264 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
4267 level = I915_CACHE_LLC;
4269 case I915_CACHING_DISPLAY:
4270 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
4276 intel_runtime_pm_get(dev_priv);
4278 ret = i915_mutex_lock_interruptible(dev);
4282 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4283 if (&obj->base == NULL) {
4288 ret = i915_gem_object_set_cache_level(obj, level);
4290 drm_gem_object_unreference(&obj->base);
4292 mutex_unlock(&dev->struct_mutex);
4294 intel_runtime_pm_put(dev_priv);
4300 * Prepare buffer for display plane (scanout, cursors, etc).
4301 * Can be called from an uninterruptible phase (modesetting) and allows
4302 * any flushes to be pipelined (for pageflips).
4305 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4307 const struct i915_ggtt_view *view)
4309 u32 old_read_domains, old_write_domain;
4312 /* Mark the pin_display early so that we account for the
4313 * display coherency whilst setting up the cache domains.
4317 /* The display engine is not coherent with the LLC cache on gen6. As
4318 * a result, we make sure that the pinning that is about to occur is
4319 * done with uncached PTEs. This is lowest common denominator for all
4322 * However for gen6+, we could do better by using the GFDT bit instead
4323 * of uncaching, which would allow us to flush all the LLC-cached data
4324 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4326 ret = i915_gem_object_set_cache_level(obj,
4327 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4329 goto err_unpin_display;
4331 /* As the user may map the buffer once pinned in the display plane
4332 * (e.g. libkms for the bootup splash), we have to ensure that we
4333 * always use map_and_fenceable for all scanout buffers.
4335 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4336 view->type == I915_GGTT_VIEW_NORMAL ?
4339 goto err_unpin_display;
4341 i915_gem_object_flush_cpu_write_domain(obj);
4343 old_write_domain = obj->base.write_domain;
4344 old_read_domains = obj->base.read_domains;
4346 /* It should now be out of any other write domains, and we can update
4347 * the domain values for our changes.
4349 obj->base.write_domain = 0;
4350 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4352 trace_i915_gem_object_change_domain(obj,
4364 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4365 const struct i915_ggtt_view *view)
4367 if (WARN_ON(obj->pin_display == 0))
4370 i915_gem_object_ggtt_unpin_view(obj, view);
4376 * Moves a single object to the CPU read, and possibly write domain.
4377 * @obj: object to act on
4378 * @write: requesting write or read-only access
4380 * This function returns when the move is complete, including waiting on
4384 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4386 uint32_t old_write_domain, old_read_domains;
4389 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4392 ret = i915_gem_object_wait_rendering(obj, !write);
4396 i915_gem_object_flush_gtt_write_domain(obj);
4398 old_write_domain = obj->base.write_domain;
4399 old_read_domains = obj->base.read_domains;
4401 /* Flush the CPU cache if it's still invalid. */
4402 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4403 i915_gem_clflush_object(obj, false);
4405 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4408 /* It should now be out of any other write domains, and we can update
4409 * the domain values for our changes.
4411 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4413 /* If we're writing through the CPU, then the GPU read domains will
4414 * need to be invalidated at next use.
4417 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4418 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4421 trace_i915_gem_object_change_domain(obj,
4428 /* Throttle our rendering by waiting until the ring has completed our requests
4429 * emitted over 20 msec ago.
4431 * Note that if we were to use the current jiffies each time around the loop,
4432 * we wouldn't escape the function with any frames outstanding if the time to
4433 * render a frame was over 20ms.
4435 * This should get us reasonable parallelism between CPU and GPU but also
4436 * relatively low latency when blocking on a particular request to finish.
4439 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4441 struct drm_i915_private *dev_priv = to_i915(dev);
4442 struct drm_i915_file_private *file_priv = file->driver_priv;
4443 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4444 struct drm_i915_gem_request *request, *target = NULL;
4447 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4451 /* ABI: return -EIO if already wedged */
4452 if (i915_terminally_wedged(&dev_priv->gpu_error))
4455 spin_lock(&file_priv->mm.lock);
4456 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4457 if (time_after_eq(request->emitted_jiffies, recent_enough))
4461 * Note that the request might not have been submitted yet.
4462 * In which case emitted_jiffies will be zero.
4464 if (!request->emitted_jiffies)
4470 i915_gem_request_reference(target);
4471 spin_unlock(&file_priv->mm.lock);
4476 ret = __i915_wait_request(target, true, NULL, NULL);
4477 i915_gem_request_unreference(target);
4483 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4485 struct drm_i915_gem_object *obj = vma->obj;
4488 vma->node.start & (alignment - 1))
4491 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4494 if (flags & PIN_OFFSET_BIAS &&
4495 vma->node.start < (flags & PIN_OFFSET_MASK))
4498 if (flags & PIN_OFFSET_FIXED &&
4499 vma->node.start != (flags & PIN_OFFSET_MASK))
4505 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4507 struct drm_i915_gem_object *obj = vma->obj;
4508 bool mappable, fenceable;
4509 u32 fence_size, fence_alignment;
4511 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4514 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4519 fenceable = (vma->node.size == fence_size &&
4520 (vma->node.start & (fence_alignment - 1)) == 0);
4522 mappable = (vma->node.start + fence_size <=
4523 to_i915(obj->base.dev)->ggtt.mappable_end);
4525 obj->map_and_fenceable = mappable && fenceable;
4529 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4530 struct i915_address_space *vm,
4531 const struct i915_ggtt_view *ggtt_view,
4535 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4536 struct i915_vma *vma;
4540 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4543 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4546 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4549 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4552 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4553 i915_gem_obj_to_vma(obj, vm);
4556 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4559 if (i915_vma_misplaced(vma, alignment, flags)) {
4560 WARN(vma->pin_count,
4561 "bo is already pinned in %s with incorrect alignment:"
4562 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4563 " obj->map_and_fenceable=%d\n",
4564 ggtt_view ? "ggtt" : "ppgtt",
4565 upper_32_bits(vma->node.start),
4566 lower_32_bits(vma->node.start),
4568 !!(flags & PIN_MAPPABLE),
4569 obj->map_and_fenceable);
4570 ret = i915_vma_unbind(vma);
4578 bound = vma ? vma->bound : 0;
4579 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4580 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4583 return PTR_ERR(vma);
4585 ret = i915_vma_bind(vma, obj->cache_level, flags);
4590 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4591 (bound ^ vma->bound) & GLOBAL_BIND) {
4592 __i915_vma_set_map_and_fenceable(vma);
4593 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4601 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4602 struct i915_address_space *vm,
4606 return i915_gem_object_do_pin(obj, vm,
4607 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4612 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4613 const struct i915_ggtt_view *view,
4617 struct drm_device *dev = obj->base.dev;
4618 struct drm_i915_private *dev_priv = to_i915(dev);
4619 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4623 return i915_gem_object_do_pin(obj, &ggtt->base, view,
4624 alignment, flags | PIN_GLOBAL);
4628 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4629 const struct i915_ggtt_view *view)
4631 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4633 WARN_ON(vma->pin_count == 0);
4634 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4640 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4641 struct drm_file *file)
4643 struct drm_i915_gem_busy *args = data;
4644 struct drm_i915_gem_object *obj;
4647 ret = i915_mutex_lock_interruptible(dev);
4651 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
4652 if (&obj->base == NULL) {
4657 /* Count all active objects as busy, even if they are currently not used
4658 * by the gpu. Users of this interface expect objects to eventually
4659 * become non-busy without any further actions, therefore emit any
4660 * necessary flushes here.
4662 ret = i915_gem_object_flush_active(obj);
4670 for (i = 0; i < I915_NUM_ENGINES; i++) {
4671 struct drm_i915_gem_request *req;
4673 req = obj->last_read_req[i];
4675 args->busy |= 1 << (16 + req->engine->exec_id);
4677 if (obj->last_write_req)
4678 args->busy |= obj->last_write_req->engine->exec_id;
4682 drm_gem_object_unreference(&obj->base);
4684 mutex_unlock(&dev->struct_mutex);
4689 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4690 struct drm_file *file_priv)
4692 return i915_gem_ring_throttle(dev, file_priv);
4696 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4697 struct drm_file *file_priv)
4699 struct drm_i915_private *dev_priv = to_i915(dev);
4700 struct drm_i915_gem_madvise *args = data;
4701 struct drm_i915_gem_object *obj;
4704 switch (args->madv) {
4705 case I915_MADV_DONTNEED:
4706 case I915_MADV_WILLNEED:
4712 ret = i915_mutex_lock_interruptible(dev);
4716 obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
4717 if (&obj->base == NULL) {
4722 if (i915_gem_obj_is_pinned(obj)) {
4728 obj->tiling_mode != I915_TILING_NONE &&
4729 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4730 if (obj->madv == I915_MADV_WILLNEED)
4731 i915_gem_object_unpin_pages(obj);
4732 if (args->madv == I915_MADV_WILLNEED)
4733 i915_gem_object_pin_pages(obj);
4736 if (obj->madv != __I915_MADV_PURGED)
4737 obj->madv = args->madv;
4739 /* if the object is no longer attached, discard its backing storage */
4740 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4741 i915_gem_object_truncate(obj);
4743 args->retained = obj->madv != __I915_MADV_PURGED;
4746 drm_gem_object_unreference(&obj->base);
4748 mutex_unlock(&dev->struct_mutex);
4752 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4753 const struct drm_i915_gem_object_ops *ops)
4757 INIT_LIST_HEAD(&obj->global_list);
4758 for (i = 0; i < I915_NUM_ENGINES; i++)
4759 INIT_LIST_HEAD(&obj->engine_list[i]);
4760 INIT_LIST_HEAD(&obj->obj_exec_link);
4761 INIT_LIST_HEAD(&obj->vma_list);
4762 INIT_LIST_HEAD(&obj->batch_pool_link);
4766 obj->fence_reg = I915_FENCE_REG_NONE;
4767 obj->madv = I915_MADV_WILLNEED;
4769 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4772 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4773 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4774 .get_pages = i915_gem_object_get_pages_gtt,
4775 .put_pages = i915_gem_object_put_pages_gtt,
4778 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4781 struct drm_i915_gem_object *obj;
4782 struct address_space *mapping;
4786 obj = i915_gem_object_alloc(dev);
4788 return ERR_PTR(-ENOMEM);
4790 ret = drm_gem_object_init(dev, &obj->base, size);
4794 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4795 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4796 /* 965gm cannot relocate objects above 4GiB. */
4797 mask &= ~__GFP_HIGHMEM;
4798 mask |= __GFP_DMA32;
4801 mapping = obj->base.filp->f_mapping;
4802 mapping_set_gfp_mask(mapping, mask);
4804 i915_gem_object_init(obj, &i915_gem_object_ops);
4806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4807 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4810 /* On some devices, we can have the GPU use the LLC (the CPU
4811 * cache) for about a 10% performance improvement
4812 * compared to uncached. Graphics requests other than
4813 * display scanout are coherent with the CPU in
4814 * accessing this cache. This means in this mode we
4815 * don't need to clflush on the CPU side, and on the
4816 * GPU side we only need to flush internal caches to
4817 * get data visible to the CPU.
4819 * However, we maintain the display planes as UC, and so
4820 * need to rebind when first used as such.
4822 obj->cache_level = I915_CACHE_LLC;
4824 obj->cache_level = I915_CACHE_NONE;
4826 trace_i915_gem_object_create(obj);
4831 i915_gem_object_free(obj);
4833 return ERR_PTR(ret);
4836 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4838 /* If we are the last user of the backing storage (be it shmemfs
4839 * pages or stolen etc), we know that the pages are going to be
4840 * immediately released. In this case, we can then skip copying
4841 * back the contents from the GPU.
4844 if (obj->madv != I915_MADV_WILLNEED)
4847 if (obj->base.filp == NULL)
4850 /* At first glance, this looks racy, but then again so would be
4851 * userspace racing mmap against close. However, the first external
4852 * reference to the filp can only be obtained through the
4853 * i915_gem_mmap_ioctl() which safeguards us against the user
4854 * acquiring such a reference whilst we are in the middle of
4855 * freeing the object.
4857 return atomic_long_read(&obj->base.filp->f_count) == 1;
4860 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4862 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4863 struct drm_device *dev = obj->base.dev;
4864 struct drm_i915_private *dev_priv = to_i915(dev);
4865 struct i915_vma *vma, *next;
4867 intel_runtime_pm_get(dev_priv);
4869 trace_i915_gem_object_destroy(obj);
4871 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4875 ret = i915_vma_unbind(vma);
4876 if (WARN_ON(ret == -ERESTARTSYS)) {
4877 bool was_interruptible;
4879 was_interruptible = dev_priv->mm.interruptible;
4880 dev_priv->mm.interruptible = false;
4882 WARN_ON(i915_vma_unbind(vma));
4884 dev_priv->mm.interruptible = was_interruptible;
4888 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4889 * before progressing. */
4891 i915_gem_object_unpin_pages(obj);
4893 WARN_ON(obj->frontbuffer_bits);
4895 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4896 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4897 obj->tiling_mode != I915_TILING_NONE)
4898 i915_gem_object_unpin_pages(obj);
4900 if (WARN_ON(obj->pages_pin_count))
4901 obj->pages_pin_count = 0;
4902 if (discard_backing_storage(obj))
4903 obj->madv = I915_MADV_DONTNEED;
4904 i915_gem_object_put_pages(obj);
4905 i915_gem_object_free_mmap_offset(obj);
4909 if (obj->base.import_attach)
4910 drm_prime_gem_destroy(&obj->base, NULL);
4912 if (obj->ops->release)
4913 obj->ops->release(obj);
4915 drm_gem_object_release(&obj->base);
4916 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4919 i915_gem_object_free(obj);
4921 intel_runtime_pm_put(dev_priv);
4924 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4925 struct i915_address_space *vm)
4927 struct i915_vma *vma;
4928 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4929 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4936 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4937 const struct i915_ggtt_view *view)
4939 struct i915_vma *vma;
4943 list_for_each_entry(vma, &obj->vma_list, obj_link)
4944 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4949 void i915_gem_vma_destroy(struct i915_vma *vma)
4951 WARN_ON(vma->node.allocated);
4953 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4954 if (!list_empty(&vma->exec_list))
4958 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4960 list_del(&vma->obj_link);
4962 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
4966 i915_gem_stop_engines(struct drm_device *dev)
4968 struct drm_i915_private *dev_priv = to_i915(dev);
4969 struct intel_engine_cs *engine;
4971 for_each_engine(engine, dev_priv)
4972 dev_priv->gt.stop_engine(engine);
4976 i915_gem_suspend(struct drm_device *dev)
4978 struct drm_i915_private *dev_priv = to_i915(dev);
4981 mutex_lock(&dev->struct_mutex);
4982 ret = i915_gem_wait_for_idle(dev_priv);
4986 i915_gem_retire_requests(dev_priv);
4988 i915_gem_stop_engines(dev);
4989 i915_gem_context_lost(dev_priv);
4990 mutex_unlock(&dev->struct_mutex);
4992 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4993 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4994 flush_delayed_work(&dev_priv->gt.idle_work);
4996 /* Assert that we sucessfully flushed all the work and
4997 * reset the GPU back to its idle, low power state.
4999 WARN_ON(dev_priv->gt.awake);
5004 mutex_unlock(&dev->struct_mutex);
5008 void i915_gem_init_swizzling(struct drm_device *dev)
5010 struct drm_i915_private *dev_priv = to_i915(dev);
5012 if (INTEL_INFO(dev)->gen < 5 ||
5013 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
5016 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
5017 DISP_TILE_SURFACE_SWIZZLING);
5022 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5024 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5025 else if (IS_GEN7(dev))
5026 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5027 else if (IS_GEN8(dev))
5028 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5033 static void init_unused_ring(struct drm_device *dev, u32 base)
5035 struct drm_i915_private *dev_priv = to_i915(dev);
5037 I915_WRITE(RING_CTL(base), 0);
5038 I915_WRITE(RING_HEAD(base), 0);
5039 I915_WRITE(RING_TAIL(base), 0);
5040 I915_WRITE(RING_START(base), 0);
5043 static void init_unused_rings(struct drm_device *dev)
5046 init_unused_ring(dev, PRB1_BASE);
5047 init_unused_ring(dev, SRB0_BASE);
5048 init_unused_ring(dev, SRB1_BASE);
5049 init_unused_ring(dev, SRB2_BASE);
5050 init_unused_ring(dev, SRB3_BASE);
5051 } else if (IS_GEN2(dev)) {
5052 init_unused_ring(dev, SRB0_BASE);
5053 init_unused_ring(dev, SRB1_BASE);
5054 } else if (IS_GEN3(dev)) {
5055 init_unused_ring(dev, PRB1_BASE);
5056 init_unused_ring(dev, PRB2_BASE);
5060 int i915_gem_init_engines(struct drm_device *dev)
5062 struct drm_i915_private *dev_priv = to_i915(dev);
5065 ret = intel_init_render_ring_buffer(dev);
5070 ret = intel_init_bsd_ring_buffer(dev);
5072 goto cleanup_render_ring;
5076 ret = intel_init_blt_ring_buffer(dev);
5078 goto cleanup_bsd_ring;
5081 if (HAS_VEBOX(dev)) {
5082 ret = intel_init_vebox_ring_buffer(dev);
5084 goto cleanup_blt_ring;
5087 if (HAS_BSD2(dev)) {
5088 ret = intel_init_bsd2_ring_buffer(dev);
5090 goto cleanup_vebox_ring;
5096 intel_cleanup_engine(&dev_priv->engine[VECS]);
5098 intel_cleanup_engine(&dev_priv->engine[BCS]);
5100 intel_cleanup_engine(&dev_priv->engine[VCS]);
5101 cleanup_render_ring:
5102 intel_cleanup_engine(&dev_priv->engine[RCS]);
5108 i915_gem_init_hw(struct drm_device *dev)
5110 struct drm_i915_private *dev_priv = to_i915(dev);
5111 struct intel_engine_cs *engine;
5114 /* Double layer security blanket, see i915_gem_init() */
5115 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5117 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
5118 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5120 if (IS_HASWELL(dev))
5121 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
5122 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5124 if (HAS_PCH_NOP(dev)) {
5125 if (IS_IVYBRIDGE(dev)) {
5126 u32 temp = I915_READ(GEN7_MSG_CTL);
5127 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5128 I915_WRITE(GEN7_MSG_CTL, temp);
5129 } else if (INTEL_INFO(dev)->gen >= 7) {
5130 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5131 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5132 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5136 i915_gem_init_swizzling(dev);
5139 * At least 830 can leave some of the unused rings
5140 * "active" (ie. head != tail) after resume which
5141 * will prevent c3 entry. Makes sure all unused rings
5144 init_unused_rings(dev);
5146 BUG_ON(!dev_priv->kernel_context);
5148 ret = i915_ppgtt_init_hw(dev);
5150 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5154 /* Need to do basic initialisation of all rings first: */
5155 for_each_engine(engine, dev_priv) {
5156 ret = engine->init_hw(engine);
5161 intel_mocs_init_l3cc_table(dev);
5163 /* We can't enable contexts until all firmware is loaded */
5164 ret = intel_guc_setup(dev);
5169 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5173 int i915_gem_init(struct drm_device *dev)
5175 struct drm_i915_private *dev_priv = to_i915(dev);
5178 mutex_lock(&dev->struct_mutex);
5180 if (!i915.enable_execlists) {
5181 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
5182 dev_priv->gt.init_engines = i915_gem_init_engines;
5183 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
5184 dev_priv->gt.stop_engine = intel_stop_engine;
5186 dev_priv->gt.execbuf_submit = intel_execlists_submission;
5187 dev_priv->gt.init_engines = intel_logical_rings_init;
5188 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5189 dev_priv->gt.stop_engine = intel_logical_ring_stop;
5192 /* This is just a security blanket to placate dragons.
5193 * On some systems, we very sporadically observe that the first TLBs
5194 * used by the CS may be stale, despite us poking the TLB reset. If
5195 * we hold the forcewake during initialisation these problems
5196 * just magically go away.
5198 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5200 i915_gem_init_userptr(dev_priv);
5201 i915_gem_init_ggtt(dev);
5203 ret = i915_gem_context_init(dev);
5207 ret = dev_priv->gt.init_engines(dev);
5211 ret = i915_gem_init_hw(dev);
5213 /* Allow ring initialisation to fail by marking the GPU as
5214 * wedged. But we only want to do this where the GPU is angry,
5215 * for all other failure, such as an allocation failure, bail.
5217 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5218 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5223 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5224 mutex_unlock(&dev->struct_mutex);
5230 i915_gem_cleanup_engines(struct drm_device *dev)
5232 struct drm_i915_private *dev_priv = to_i915(dev);
5233 struct intel_engine_cs *engine;
5235 for_each_engine(engine, dev_priv)
5236 dev_priv->gt.cleanup_engine(engine);
5240 init_engine_lists(struct intel_engine_cs *engine)
5242 INIT_LIST_HEAD(&engine->active_list);
5243 INIT_LIST_HEAD(&engine->request_list);
5247 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5249 struct drm_device *dev = &dev_priv->drm;
5251 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5252 !IS_CHERRYVIEW(dev_priv))
5253 dev_priv->num_fence_regs = 32;
5254 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
5255 IS_I945GM(dev_priv) || IS_G33(dev_priv))
5256 dev_priv->num_fence_regs = 16;
5258 dev_priv->num_fence_regs = 8;
5260 if (intel_vgpu_active(dev_priv))
5261 dev_priv->num_fence_regs =
5262 I915_READ(vgtif_reg(avail_rs.fence_num));
5264 /* Initialize fence registers to zero */
5265 i915_gem_restore_fences(dev);
5267 i915_gem_detect_bit_6_swizzle(dev);
5271 i915_gem_load_init(struct drm_device *dev)
5273 struct drm_i915_private *dev_priv = to_i915(dev);
5277 kmem_cache_create("i915_gem_object",
5278 sizeof(struct drm_i915_gem_object), 0,
5282 kmem_cache_create("i915_gem_vma",
5283 sizeof(struct i915_vma), 0,
5286 dev_priv->requests =
5287 kmem_cache_create("i915_gem_request",
5288 sizeof(struct drm_i915_gem_request), 0,
5292 INIT_LIST_HEAD(&dev_priv->vm_list);
5293 INIT_LIST_HEAD(&dev_priv->context_list);
5294 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5295 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5296 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5297 for (i = 0; i < I915_NUM_ENGINES; i++)
5298 init_engine_lists(&dev_priv->engine[i]);
5299 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5300 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5301 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5302 i915_gem_retire_work_handler);
5303 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5304 i915_gem_idle_work_handler);
5305 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5306 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5308 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5310 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5312 init_waitqueue_head(&dev_priv->pending_flip_queue);
5314 dev_priv->mm.interruptible = true;
5316 mutex_init(&dev_priv->fb_tracking.lock);
5319 void i915_gem_load_cleanup(struct drm_device *dev)
5321 struct drm_i915_private *dev_priv = to_i915(dev);
5323 kmem_cache_destroy(dev_priv->requests);
5324 kmem_cache_destroy(dev_priv->vmas);
5325 kmem_cache_destroy(dev_priv->objects);
5328 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5330 struct drm_i915_gem_object *obj;
5332 /* Called just before we write the hibernation image.
5334 * We need to update the domain tracking to reflect that the CPU
5335 * will be accessing all the pages to create and restore from the
5336 * hibernation, and so upon restoration those pages will be in the
5339 * To make sure the hibernation image contains the latest state,
5340 * we update that state just before writing out the image.
5343 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5344 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5345 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5348 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5349 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5350 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5356 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5358 struct drm_i915_file_private *file_priv = file->driver_priv;
5360 /* Clean up our request list when the client is going away, so that
5361 * later retire_requests won't dereference our soon-to-be-gone
5364 spin_lock(&file_priv->mm.lock);
5365 while (!list_empty(&file_priv->mm.request_list)) {
5366 struct drm_i915_gem_request *request;
5368 request = list_first_entry(&file_priv->mm.request_list,
5369 struct drm_i915_gem_request,
5371 list_del(&request->client_list);
5372 request->file_priv = NULL;
5374 spin_unlock(&file_priv->mm.lock);
5376 if (!list_empty(&file_priv->rps.link)) {
5377 spin_lock(&to_i915(dev)->rps.client_lock);
5378 list_del(&file_priv->rps.link);
5379 spin_unlock(&to_i915(dev)->rps.client_lock);
5383 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5385 struct drm_i915_file_private *file_priv;
5388 DRM_DEBUG_DRIVER("\n");
5390 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5394 file->driver_priv = file_priv;
5395 file_priv->dev_priv = to_i915(dev);
5396 file_priv->file = file;
5397 INIT_LIST_HEAD(&file_priv->rps.link);
5399 spin_lock_init(&file_priv->mm.lock);
5400 INIT_LIST_HEAD(&file_priv->mm.request_list);
5402 file_priv->bsd_ring = -1;
5404 ret = i915_gem_context_open(dev, file);
5412 * i915_gem_track_fb - update frontbuffer tracking
5413 * @old: current GEM buffer for the frontbuffer slots
5414 * @new: new GEM buffer for the frontbuffer slots
5415 * @frontbuffer_bits: bitmask of frontbuffer slots
5417 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5418 * from @old and setting them in @new. Both @old and @new can be NULL.
5420 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5421 struct drm_i915_gem_object *new,
5422 unsigned frontbuffer_bits)
5425 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5426 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5427 old->frontbuffer_bits &= ~frontbuffer_bits;
5431 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5432 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5433 new->frontbuffer_bits |= frontbuffer_bits;
5437 /* All the new VM stuff */
5438 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5439 struct i915_address_space *vm)
5441 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5442 struct i915_vma *vma;
5444 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5446 list_for_each_entry(vma, &o->vma_list, obj_link) {
5448 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5451 return vma->node.start;
5454 WARN(1, "%s vma for this object not found.\n",
5455 i915_is_ggtt(vm) ? "global" : "ppgtt");
5459 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5460 const struct i915_ggtt_view *view)
5462 struct i915_vma *vma;
5464 list_for_each_entry(vma, &o->vma_list, obj_link)
5465 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5466 return vma->node.start;
5468 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5472 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5473 struct i915_address_space *vm)
5475 struct i915_vma *vma;
5477 list_for_each_entry(vma, &o->vma_list, obj_link) {
5479 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5481 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5488 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5489 const struct i915_ggtt_view *view)
5491 struct i915_vma *vma;
5493 list_for_each_entry(vma, &o->vma_list, obj_link)
5495 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5496 drm_mm_node_allocated(&vma->node))
5502 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5504 struct i915_vma *vma;
5506 list_for_each_entry(vma, &o->vma_list, obj_link)
5507 if (drm_mm_node_allocated(&vma->node))
5513 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5515 struct i915_vma *vma;
5517 GEM_BUG_ON(list_empty(&o->vma_list));
5519 list_for_each_entry(vma, &o->vma_list, obj_link) {
5521 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5522 return vma->node.size;
5528 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5530 struct i915_vma *vma;
5531 list_for_each_entry(vma, &obj->vma_list, obj_link)
5532 if (vma->pin_count > 0)
5538 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5540 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5544 /* Only default objects have per-page dirty tracking */
5545 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
5548 page = i915_gem_object_get_page(obj, n);
5549 set_page_dirty(page);
5553 /* Allocate a new GEM object and fill it with the supplied data */
5554 struct drm_i915_gem_object *
5555 i915_gem_object_create_from_data(struct drm_device *dev,
5556 const void *data, size_t size)
5558 struct drm_i915_gem_object *obj;
5559 struct sg_table *sg;
5563 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5567 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5571 ret = i915_gem_object_get_pages(obj);
5575 i915_gem_object_pin_pages(obj);
5577 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5578 obj->dirty = 1; /* Backing store is now out of date */
5579 i915_gem_object_unpin_pages(obj);
5581 if (WARN_ON(bytes != size)) {
5582 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5590 drm_gem_object_unreference(&obj->base);
5591 return ERR_PTR(ret);