drm/i915: Add missing rpm wakelock to GGTT pread
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
43
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
46
47 static bool cpu_cache_is_coherent(struct drm_device *dev,
48                                   enum i915_cache_level level)
49 {
50         return HAS_LLC(dev) || level != I915_CACHE_NONE;
51 }
52
53 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
54 {
55         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
56                 return false;
57
58         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
59                 return true;
60
61         return obj->pin_display;
62 }
63
64 static int
65 insert_mappable_node(struct drm_i915_private *i915,
66                      struct drm_mm_node *node, u32 size)
67 {
68         memset(node, 0, sizeof(*node));
69         return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
70                                                    size, 0, 0, 0,
71                                                    i915->ggtt.mappable_end,
72                                                    DRM_MM_SEARCH_DEFAULT,
73                                                    DRM_MM_CREATE_DEFAULT);
74 }
75
76 static void
77 remove_mappable_node(struct drm_mm_node *node)
78 {
79         drm_mm_remove_node(node);
80 }
81
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
84                                   size_t size)
85 {
86         spin_lock(&dev_priv->mm.object_stat_lock);
87         dev_priv->mm.object_count++;
88         dev_priv->mm.object_memory += size;
89         spin_unlock(&dev_priv->mm.object_stat_lock);
90 }
91
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
93                                      size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count--;
97         dev_priv->mm.object_memory -= size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static int
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
103 {
104         int ret;
105
106         if (!i915_reset_in_progress(error))
107                 return 0;
108
109         /*
110          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
111          * userspace. If it takes that long something really bad is going on and
112          * we should simply try to bail out and fail as gracefully as possible.
113          */
114         ret = wait_event_interruptible_timeout(error->reset_queue,
115                                                !i915_reset_in_progress(error),
116                                                10*HZ);
117         if (ret == 0) {
118                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
119                 return -EIO;
120         } else if (ret < 0) {
121                 return ret;
122         } else {
123                 return 0;
124         }
125 }
126
127 int i915_mutex_lock_interruptible(struct drm_device *dev)
128 {
129         struct drm_i915_private *dev_priv = to_i915(dev);
130         int ret;
131
132         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
133         if (ret)
134                 return ret;
135
136         ret = mutex_lock_interruptible(&dev->struct_mutex);
137         if (ret)
138                 return ret;
139
140         return 0;
141 }
142
143 int
144 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
145                             struct drm_file *file)
146 {
147         struct drm_i915_private *dev_priv = to_i915(dev);
148         struct i915_ggtt *ggtt = &dev_priv->ggtt;
149         struct drm_i915_gem_get_aperture *args = data;
150         struct i915_vma *vma;
151         size_t pinned;
152
153         pinned = 0;
154         mutex_lock(&dev->struct_mutex);
155         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
156                 if (vma->pin_count)
157                         pinned += vma->node.size;
158         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
159                 if (vma->pin_count)
160                         pinned += vma->node.size;
161         mutex_unlock(&dev->struct_mutex);
162
163         args->aper_size = ggtt->base.total;
164         args->aper_available_size = args->aper_size - pinned;
165
166         return 0;
167 }
168
169 static int
170 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
171 {
172         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
173         char *vaddr = obj->phys_handle->vaddr;
174         struct sg_table *st;
175         struct scatterlist *sg;
176         int i;
177
178         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
179                 return -EINVAL;
180
181         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
182                 struct page *page;
183                 char *src;
184
185                 page = shmem_read_mapping_page(mapping, i);
186                 if (IS_ERR(page))
187                         return PTR_ERR(page);
188
189                 src = kmap_atomic(page);
190                 memcpy(vaddr, src, PAGE_SIZE);
191                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
192                 kunmap_atomic(src);
193
194                 put_page(page);
195                 vaddr += PAGE_SIZE;
196         }
197
198         i915_gem_chipset_flush(to_i915(obj->base.dev));
199
200         st = kmalloc(sizeof(*st), GFP_KERNEL);
201         if (st == NULL)
202                 return -ENOMEM;
203
204         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
205                 kfree(st);
206                 return -ENOMEM;
207         }
208
209         sg = st->sgl;
210         sg->offset = 0;
211         sg->length = obj->base.size;
212
213         sg_dma_address(sg) = obj->phys_handle->busaddr;
214         sg_dma_len(sg) = obj->base.size;
215
216         obj->pages = st;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (WARN_ON(ret)) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
233         }
234
235         if (obj->madv == I915_MADV_DONTNEED)
236                 obj->dirty = 0;
237
238         if (obj->dirty) {
239                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
240                 char *vaddr = obj->phys_handle->vaddr;
241                 int i;
242
243                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
244                         struct page *page;
245                         char *dst;
246
247                         page = shmem_read_mapping_page(mapping, i);
248                         if (IS_ERR(page))
249                                 continue;
250
251                         dst = kmap_atomic(page);
252                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
253                         memcpy(dst, vaddr, PAGE_SIZE);
254                         kunmap_atomic(dst);
255
256                         set_page_dirty(page);
257                         if (obj->madv == I915_MADV_WILLNEED)
258                                 mark_page_accessed(page);
259                         put_page(page);
260                         vaddr += PAGE_SIZE;
261                 }
262                 obj->dirty = 0;
263         }
264
265         sg_free_table(obj->pages);
266         kfree(obj->pages);
267 }
268
269 static void
270 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
271 {
272         drm_pci_free(obj->base.dev, obj->phys_handle);
273 }
274
275 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
276         .get_pages = i915_gem_object_get_pages_phys,
277         .put_pages = i915_gem_object_put_pages_phys,
278         .release = i915_gem_object_release_phys,
279 };
280
281 int
282 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 {
284         struct i915_vma *vma;
285         LIST_HEAD(still_in_list);
286         int ret;
287
288         /* The vma will only be freed if it is marked as closed, and if we wait
289          * upon rendering to the vma, we may unbind anything in the list.
290          */
291         while ((vma = list_first_entry_or_null(&obj->vma_list,
292                                                struct i915_vma,
293                                                obj_link))) {
294                 list_move_tail(&vma->obj_link, &still_in_list);
295                 ret = i915_vma_unbind(vma);
296                 if (ret)
297                         break;
298         }
299         list_splice(&still_in_list, &obj->vma_list);
300
301         return ret;
302 }
303
304 int
305 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
306                             int align)
307 {
308         drm_dma_handle_t *phys;
309         int ret;
310
311         if (obj->phys_handle) {
312                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
313                         return -EBUSY;
314
315                 return 0;
316         }
317
318         if (obj->madv != I915_MADV_WILLNEED)
319                 return -EFAULT;
320
321         if (obj->base.filp == NULL)
322                 return -EINVAL;
323
324         ret = i915_gem_object_unbind(obj);
325         if (ret)
326                 return ret;
327
328         ret = i915_gem_object_put_pages(obj);
329         if (ret)
330                 return ret;
331
332         /* create a new object */
333         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
334         if (!phys)
335                 return -ENOMEM;
336
337         obj->phys_handle = phys;
338         obj->ops = &i915_gem_phys_ops;
339
340         return i915_gem_object_get_pages(obj);
341 }
342
343 static int
344 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
345                      struct drm_i915_gem_pwrite *args,
346                      struct drm_file *file_priv)
347 {
348         struct drm_device *dev = obj->base.dev;
349         void *vaddr = obj->phys_handle->vaddr + args->offset;
350         char __user *user_data = u64_to_user_ptr(args->data_ptr);
351         int ret = 0;
352
353         /* We manually control the domain here and pretend that it
354          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
355          */
356         ret = i915_gem_object_wait_rendering(obj, false);
357         if (ret)
358                 return ret;
359
360         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
361         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
362                 unsigned long unwritten;
363
364                 /* The physical object once assigned is fixed for the lifetime
365                  * of the obj, so we can safely drop the lock and continue
366                  * to access vaddr.
367                  */
368                 mutex_unlock(&dev->struct_mutex);
369                 unwritten = copy_from_user(vaddr, user_data, args->size);
370                 mutex_lock(&dev->struct_mutex);
371                 if (unwritten) {
372                         ret = -EFAULT;
373                         goto out;
374                 }
375         }
376
377         drm_clflush_virt_range(vaddr, args->size);
378         i915_gem_chipset_flush(to_i915(dev));
379
380 out:
381         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
382         return ret;
383 }
384
385 void *i915_gem_object_alloc(struct drm_device *dev)
386 {
387         struct drm_i915_private *dev_priv = to_i915(dev);
388         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
389 }
390
391 void i915_gem_object_free(struct drm_i915_gem_object *obj)
392 {
393         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
394         kmem_cache_free(dev_priv->objects, obj);
395 }
396
397 static int
398 i915_gem_create(struct drm_file *file,
399                 struct drm_device *dev,
400                 uint64_t size,
401                 uint32_t *handle_p)
402 {
403         struct drm_i915_gem_object *obj;
404         int ret;
405         u32 handle;
406
407         size = roundup(size, PAGE_SIZE);
408         if (size == 0)
409                 return -EINVAL;
410
411         /* Allocate the new object */
412         obj = i915_gem_object_create(dev, size);
413         if (IS_ERR(obj))
414                 return PTR_ERR(obj);
415
416         ret = drm_gem_handle_create(file, &obj->base, &handle);
417         /* drop reference from allocate - handle holds it now */
418         i915_gem_object_put_unlocked(obj);
419         if (ret)
420                 return ret;
421
422         *handle_p = handle;
423         return 0;
424 }
425
426 int
427 i915_gem_dumb_create(struct drm_file *file,
428                      struct drm_device *dev,
429                      struct drm_mode_create_dumb *args)
430 {
431         /* have to work out size/pitch and return them */
432         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
433         args->size = args->pitch * args->height;
434         return i915_gem_create(file, dev,
435                                args->size, &args->handle);
436 }
437
438 /**
439  * Creates a new mm object and returns a handle to it.
440  * @dev: drm device pointer
441  * @data: ioctl data blob
442  * @file: drm file pointer
443  */
444 int
445 i915_gem_create_ioctl(struct drm_device *dev, void *data,
446                       struct drm_file *file)
447 {
448         struct drm_i915_gem_create *args = data;
449
450         return i915_gem_create(file, dev,
451                                args->size, &args->handle);
452 }
453
454 static inline int
455 __copy_to_user_swizzled(char __user *cpu_vaddr,
456                         const char *gpu_vaddr, int gpu_offset,
457                         int length)
458 {
459         int ret, cpu_offset = 0;
460
461         while (length > 0) {
462                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
463                 int this_length = min(cacheline_end - gpu_offset, length);
464                 int swizzled_gpu_offset = gpu_offset ^ 64;
465
466                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
467                                      gpu_vaddr + swizzled_gpu_offset,
468                                      this_length);
469                 if (ret)
470                         return ret + length;
471
472                 cpu_offset += this_length;
473                 gpu_offset += this_length;
474                 length -= this_length;
475         }
476
477         return 0;
478 }
479
480 static inline int
481 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
482                           const char __user *cpu_vaddr,
483                           int length)
484 {
485         int ret, cpu_offset = 0;
486
487         while (length > 0) {
488                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
489                 int this_length = min(cacheline_end - gpu_offset, length);
490                 int swizzled_gpu_offset = gpu_offset ^ 64;
491
492                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
493                                        cpu_vaddr + cpu_offset,
494                                        this_length);
495                 if (ret)
496                         return ret + length;
497
498                 cpu_offset += this_length;
499                 gpu_offset += this_length;
500                 length -= this_length;
501         }
502
503         return 0;
504 }
505
506 /*
507  * Pins the specified object's pages and synchronizes the object with
508  * GPU accesses. Sets needs_clflush to non-zero if the caller should
509  * flush the object from the CPU cache.
510  */
511 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
512                                     int *needs_clflush)
513 {
514         int ret;
515
516         *needs_clflush = 0;
517
518         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
519                 return -EINVAL;
520
521         ret = i915_gem_object_wait_rendering(obj, true);
522         if (ret)
523                 return ret;
524
525         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
526                 /* If we're not in the cpu read domain, set ourself into the gtt
527                  * read domain and manually flush cachelines (if required). This
528                  * optimizes for the case when the gpu will dirty the data
529                  * anyway again before the next pread happens. */
530                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
531                                                         obj->cache_level);
532         }
533
534         ret = i915_gem_object_get_pages(obj);
535         if (ret)
536                 return ret;
537
538         i915_gem_object_pin_pages(obj);
539
540         return ret;
541 }
542
543 /* Per-page copy function for the shmem pread fastpath.
544  * Flushes invalid cachelines before reading the target if
545  * needs_clflush is set. */
546 static int
547 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
548                  char __user *user_data,
549                  bool page_do_bit17_swizzling, bool needs_clflush)
550 {
551         char *vaddr;
552         int ret;
553
554         if (unlikely(page_do_bit17_swizzling))
555                 return -EINVAL;
556
557         vaddr = kmap_atomic(page);
558         if (needs_clflush)
559                 drm_clflush_virt_range(vaddr + shmem_page_offset,
560                                        page_length);
561         ret = __copy_to_user_inatomic(user_data,
562                                       vaddr + shmem_page_offset,
563                                       page_length);
564         kunmap_atomic(vaddr);
565
566         return ret ? -EFAULT : 0;
567 }
568
569 static void
570 shmem_clflush_swizzled_range(char *addr, unsigned long length,
571                              bool swizzled)
572 {
573         if (unlikely(swizzled)) {
574                 unsigned long start = (unsigned long) addr;
575                 unsigned long end = (unsigned long) addr + length;
576
577                 /* For swizzling simply ensure that we always flush both
578                  * channels. Lame, but simple and it works. Swizzled
579                  * pwrite/pread is far from a hotpath - current userspace
580                  * doesn't use it at all. */
581                 start = round_down(start, 128);
582                 end = round_up(end, 128);
583
584                 drm_clflush_virt_range((void *)start, end - start);
585         } else {
586                 drm_clflush_virt_range(addr, length);
587         }
588
589 }
590
591 /* Only difference to the fast-path function is that this can handle bit17
592  * and uses non-atomic copy and kmap functions. */
593 static int
594 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
595                  char __user *user_data,
596                  bool page_do_bit17_swizzling, bool needs_clflush)
597 {
598         char *vaddr;
599         int ret;
600
601         vaddr = kmap(page);
602         if (needs_clflush)
603                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
604                                              page_length,
605                                              page_do_bit17_swizzling);
606
607         if (page_do_bit17_swizzling)
608                 ret = __copy_to_user_swizzled(user_data,
609                                               vaddr, shmem_page_offset,
610                                               page_length);
611         else
612                 ret = __copy_to_user(user_data,
613                                      vaddr + shmem_page_offset,
614                                      page_length);
615         kunmap(page);
616
617         return ret ? - EFAULT : 0;
618 }
619
620 static inline unsigned long
621 slow_user_access(struct io_mapping *mapping,
622                  uint64_t page_base, int page_offset,
623                  char __user *user_data,
624                  unsigned long length, bool pwrite)
625 {
626         void __iomem *ioaddr;
627         void *vaddr;
628         uint64_t unwritten;
629
630         ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
631         /* We can use the cpu mem copy function because this is X86. */
632         vaddr = (void __force *)ioaddr + page_offset;
633         if (pwrite)
634                 unwritten = __copy_from_user(vaddr, user_data, length);
635         else
636                 unwritten = __copy_to_user(user_data, vaddr, length);
637
638         io_mapping_unmap(ioaddr);
639         return unwritten;
640 }
641
642 static int
643 i915_gem_gtt_pread(struct drm_device *dev,
644                    struct drm_i915_gem_object *obj, uint64_t size,
645                    uint64_t data_offset, uint64_t data_ptr)
646 {
647         struct drm_i915_private *dev_priv = to_i915(dev);
648         struct i915_ggtt *ggtt = &dev_priv->ggtt;
649         struct drm_mm_node node;
650         char __user *user_data;
651         uint64_t remain;
652         uint64_t offset;
653         int ret;
654
655         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
656         if (ret) {
657                 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
658                 if (ret)
659                         goto out;
660
661                 ret = i915_gem_object_get_pages(obj);
662                 if (ret) {
663                         remove_mappable_node(&node);
664                         goto out;
665                 }
666
667                 i915_gem_object_pin_pages(obj);
668         } else {
669                 node.start = i915_gem_obj_ggtt_offset(obj);
670                 node.allocated = false;
671                 ret = i915_gem_object_put_fence(obj);
672                 if (ret)
673                         goto out_unpin;
674         }
675
676         ret = i915_gem_object_set_to_gtt_domain(obj, false);
677         if (ret)
678                 goto out_unpin;
679
680         user_data = u64_to_user_ptr(data_ptr);
681         remain = size;
682         offset = data_offset;
683
684         mutex_unlock(&dev->struct_mutex);
685         if (likely(!i915.prefault_disable)) {
686                 ret = fault_in_multipages_writeable(user_data, remain);
687                 if (ret) {
688                         mutex_lock(&dev->struct_mutex);
689                         goto out_unpin;
690                 }
691         }
692
693         while (remain > 0) {
694                 /* Operation in this page
695                  *
696                  * page_base = page offset within aperture
697                  * page_offset = offset within page
698                  * page_length = bytes to copy for this page
699                  */
700                 u32 page_base = node.start;
701                 unsigned page_offset = offset_in_page(offset);
702                 unsigned page_length = PAGE_SIZE - page_offset;
703                 page_length = remain < page_length ? remain : page_length;
704                 if (node.allocated) {
705                         wmb();
706                         ggtt->base.insert_page(&ggtt->base,
707                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
708                                                node.start,
709                                                I915_CACHE_NONE, 0);
710                         wmb();
711                 } else {
712                         page_base += offset & PAGE_MASK;
713                 }
714                 /* This is a slow read/write as it tries to read from
715                  * and write to user memory which may result into page
716                  * faults, and so we cannot perform this under struct_mutex.
717                  */
718                 if (slow_user_access(ggtt->mappable, page_base,
719                                      page_offset, user_data,
720                                      page_length, false)) {
721                         ret = -EFAULT;
722                         break;
723                 }
724
725                 remain -= page_length;
726                 user_data += page_length;
727                 offset += page_length;
728         }
729
730         mutex_lock(&dev->struct_mutex);
731         if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
732                 /* The user has modified the object whilst we tried
733                  * reading from it, and we now have no idea what domain
734                  * the pages should be in. As we have just been touching
735                  * them directly, flush everything back to the GTT
736                  * domain.
737                  */
738                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
739         }
740
741 out_unpin:
742         if (node.allocated) {
743                 wmb();
744                 ggtt->base.clear_range(&ggtt->base,
745                                        node.start, node.size,
746                                        true);
747                 i915_gem_object_unpin_pages(obj);
748                 remove_mappable_node(&node);
749         } else {
750                 i915_gem_object_ggtt_unpin(obj);
751         }
752 out:
753         return ret;
754 }
755
756 static int
757 i915_gem_shmem_pread(struct drm_device *dev,
758                      struct drm_i915_gem_object *obj,
759                      struct drm_i915_gem_pread *args,
760                      struct drm_file *file)
761 {
762         char __user *user_data;
763         ssize_t remain;
764         loff_t offset;
765         int shmem_page_offset, page_length, ret = 0;
766         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
767         int prefaulted = 0;
768         int needs_clflush = 0;
769         struct sg_page_iter sg_iter;
770
771         if (!i915_gem_object_has_struct_page(obj))
772                 return -ENODEV;
773
774         user_data = u64_to_user_ptr(args->data_ptr);
775         remain = args->size;
776
777         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
778
779         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
780         if (ret)
781                 return ret;
782
783         offset = args->offset;
784
785         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
786                          offset >> PAGE_SHIFT) {
787                 struct page *page = sg_page_iter_page(&sg_iter);
788
789                 if (remain <= 0)
790                         break;
791
792                 /* Operation in this page
793                  *
794                  * shmem_page_offset = offset within page in shmem file
795                  * page_length = bytes to copy for this page
796                  */
797                 shmem_page_offset = offset_in_page(offset);
798                 page_length = remain;
799                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
800                         page_length = PAGE_SIZE - shmem_page_offset;
801
802                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803                         (page_to_phys(page) & (1 << 17)) != 0;
804
805                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
806                                        user_data, page_do_bit17_swizzling,
807                                        needs_clflush);
808                 if (ret == 0)
809                         goto next_page;
810
811                 mutex_unlock(&dev->struct_mutex);
812
813                 if (likely(!i915.prefault_disable) && !prefaulted) {
814                         ret = fault_in_multipages_writeable(user_data, remain);
815                         /* Userspace is tricking us, but we've already clobbered
816                          * its pages with the prefault and promised to write the
817                          * data up to the first fault. Hence ignore any errors
818                          * and just continue. */
819                         (void)ret;
820                         prefaulted = 1;
821                 }
822
823                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
824                                        user_data, page_do_bit17_swizzling,
825                                        needs_clflush);
826
827                 mutex_lock(&dev->struct_mutex);
828
829                 if (ret)
830                         goto out;
831
832 next_page:
833                 remain -= page_length;
834                 user_data += page_length;
835                 offset += page_length;
836         }
837
838 out:
839         i915_gem_object_unpin_pages(obj);
840
841         return ret;
842 }
843
844 /**
845  * Reads data from the object referenced by handle.
846  * @dev: drm device pointer
847  * @data: ioctl data blob
848  * @file: drm file pointer
849  *
850  * On error, the contents of *data are undefined.
851  */
852 int
853 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
854                      struct drm_file *file)
855 {
856         struct drm_i915_gem_pread *args = data;
857         struct drm_i915_gem_object *obj;
858         int ret = 0;
859
860         if (args->size == 0)
861                 return 0;
862
863         if (!access_ok(VERIFY_WRITE,
864                        u64_to_user_ptr(args->data_ptr),
865                        args->size))
866                 return -EFAULT;
867
868         ret = i915_mutex_lock_interruptible(dev);
869         if (ret)
870                 return ret;
871
872         obj = i915_gem_object_lookup(file, args->handle);
873         if (!obj) {
874                 ret = -ENOENT;
875                 goto unlock;
876         }
877
878         /* Bounds check source.  */
879         if (args->offset > obj->base.size ||
880             args->size > obj->base.size - args->offset) {
881                 ret = -EINVAL;
882                 goto out;
883         }
884
885         trace_i915_gem_object_pread(obj, args->offset, args->size);
886
887         ret = i915_gem_shmem_pread(dev, obj, args, file);
888
889         /* pread for non shmem backed objects */
890         if (ret == -EFAULT || ret == -ENODEV) {
891                 intel_runtime_pm_get(to_i915(dev));
892                 ret = i915_gem_gtt_pread(dev, obj, args->size,
893                                         args->offset, args->data_ptr);
894                 intel_runtime_pm_put(to_i915(dev));
895         }
896
897 out:
898         i915_gem_object_put(obj);
899 unlock:
900         mutex_unlock(&dev->struct_mutex);
901         return ret;
902 }
903
904 /* This is the fast write path which cannot handle
905  * page faults in the source data
906  */
907
908 static inline int
909 fast_user_write(struct io_mapping *mapping,
910                 loff_t page_base, int page_offset,
911                 char __user *user_data,
912                 int length)
913 {
914         void __iomem *vaddr_atomic;
915         void *vaddr;
916         unsigned long unwritten;
917
918         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
919         /* We can use the cpu mem copy function because this is X86. */
920         vaddr = (void __force*)vaddr_atomic + page_offset;
921         unwritten = __copy_from_user_inatomic_nocache(vaddr,
922                                                       user_data, length);
923         io_mapping_unmap_atomic(vaddr_atomic);
924         return unwritten;
925 }
926
927 /**
928  * This is the fast pwrite path, where we copy the data directly from the
929  * user into the GTT, uncached.
930  * @i915: i915 device private data
931  * @obj: i915 gem object
932  * @args: pwrite arguments structure
933  * @file: drm file pointer
934  */
935 static int
936 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
937                          struct drm_i915_gem_object *obj,
938                          struct drm_i915_gem_pwrite *args,
939                          struct drm_file *file)
940 {
941         struct i915_ggtt *ggtt = &i915->ggtt;
942         struct drm_device *dev = obj->base.dev;
943         struct drm_mm_node node;
944         uint64_t remain, offset;
945         char __user *user_data;
946         int ret;
947         bool hit_slow_path = false;
948
949         if (obj->tiling_mode != I915_TILING_NONE)
950                 return -EFAULT;
951
952         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
953         if (ret) {
954                 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
955                 if (ret)
956                         goto out;
957
958                 ret = i915_gem_object_get_pages(obj);
959                 if (ret) {
960                         remove_mappable_node(&node);
961                         goto out;
962                 }
963
964                 i915_gem_object_pin_pages(obj);
965         } else {
966                 node.start = i915_gem_obj_ggtt_offset(obj);
967                 node.allocated = false;
968                 ret = i915_gem_object_put_fence(obj);
969                 if (ret)
970                         goto out_unpin;
971         }
972
973         ret = i915_gem_object_set_to_gtt_domain(obj, true);
974         if (ret)
975                 goto out_unpin;
976
977         intel_fb_obj_invalidate(obj, ORIGIN_GTT);
978         obj->dirty = true;
979
980         user_data = u64_to_user_ptr(args->data_ptr);
981         offset = args->offset;
982         remain = args->size;
983         while (remain) {
984                 /* Operation in this page
985                  *
986                  * page_base = page offset within aperture
987                  * page_offset = offset within page
988                  * page_length = bytes to copy for this page
989                  */
990                 u32 page_base = node.start;
991                 unsigned page_offset = offset_in_page(offset);
992                 unsigned page_length = PAGE_SIZE - page_offset;
993                 page_length = remain < page_length ? remain : page_length;
994                 if (node.allocated) {
995                         wmb(); /* flush the write before we modify the GGTT */
996                         ggtt->base.insert_page(&ggtt->base,
997                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
998                                                node.start, I915_CACHE_NONE, 0);
999                         wmb(); /* flush modifications to the GGTT (insert_page) */
1000                 } else {
1001                         page_base += offset & PAGE_MASK;
1002                 }
1003                 /* If we get a fault while copying data, then (presumably) our
1004                  * source page isn't available.  Return the error and we'll
1005                  * retry in the slow path.
1006                  * If the object is non-shmem backed, we retry again with the
1007                  * path that handles page fault.
1008                  */
1009                 if (fast_user_write(ggtt->mappable, page_base,
1010                                     page_offset, user_data, page_length)) {
1011                         hit_slow_path = true;
1012                         mutex_unlock(&dev->struct_mutex);
1013                         if (slow_user_access(ggtt->mappable,
1014                                              page_base,
1015                                              page_offset, user_data,
1016                                              page_length, true)) {
1017                                 ret = -EFAULT;
1018                                 mutex_lock(&dev->struct_mutex);
1019                                 goto out_flush;
1020                         }
1021
1022                         mutex_lock(&dev->struct_mutex);
1023                 }
1024
1025                 remain -= page_length;
1026                 user_data += page_length;
1027                 offset += page_length;
1028         }
1029
1030 out_flush:
1031         if (hit_slow_path) {
1032                 if (ret == 0 &&
1033                     (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1034                         /* The user has modified the object whilst we tried
1035                          * reading from it, and we now have no idea what domain
1036                          * the pages should be in. As we have just been touching
1037                          * them directly, flush everything back to the GTT
1038                          * domain.
1039                          */
1040                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1041                 }
1042         }
1043
1044         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1045 out_unpin:
1046         if (node.allocated) {
1047                 wmb();
1048                 ggtt->base.clear_range(&ggtt->base,
1049                                        node.start, node.size,
1050                                        true);
1051                 i915_gem_object_unpin_pages(obj);
1052                 remove_mappable_node(&node);
1053         } else {
1054                 i915_gem_object_ggtt_unpin(obj);
1055         }
1056 out:
1057         return ret;
1058 }
1059
1060 /* Per-page copy function for the shmem pwrite fastpath.
1061  * Flushes invalid cachelines before writing to the target if
1062  * needs_clflush_before is set and flushes out any written cachelines after
1063  * writing if needs_clflush is set. */
1064 static int
1065 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1066                   char __user *user_data,
1067                   bool page_do_bit17_swizzling,
1068                   bool needs_clflush_before,
1069                   bool needs_clflush_after)
1070 {
1071         char *vaddr;
1072         int ret;
1073
1074         if (unlikely(page_do_bit17_swizzling))
1075                 return -EINVAL;
1076
1077         vaddr = kmap_atomic(page);
1078         if (needs_clflush_before)
1079                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1080                                        page_length);
1081         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1082                                         user_data, page_length);
1083         if (needs_clflush_after)
1084                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1085                                        page_length);
1086         kunmap_atomic(vaddr);
1087
1088         return ret ? -EFAULT : 0;
1089 }
1090
1091 /* Only difference to the fast-path function is that this can handle bit17
1092  * and uses non-atomic copy and kmap functions. */
1093 static int
1094 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1095                   char __user *user_data,
1096                   bool page_do_bit17_swizzling,
1097                   bool needs_clflush_before,
1098                   bool needs_clflush_after)
1099 {
1100         char *vaddr;
1101         int ret;
1102
1103         vaddr = kmap(page);
1104         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1105                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1106                                              page_length,
1107                                              page_do_bit17_swizzling);
1108         if (page_do_bit17_swizzling)
1109                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1110                                                 user_data,
1111                                                 page_length);
1112         else
1113                 ret = __copy_from_user(vaddr + shmem_page_offset,
1114                                        user_data,
1115                                        page_length);
1116         if (needs_clflush_after)
1117                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1118                                              page_length,
1119                                              page_do_bit17_swizzling);
1120         kunmap(page);
1121
1122         return ret ? -EFAULT : 0;
1123 }
1124
1125 static int
1126 i915_gem_shmem_pwrite(struct drm_device *dev,
1127                       struct drm_i915_gem_object *obj,
1128                       struct drm_i915_gem_pwrite *args,
1129                       struct drm_file *file)
1130 {
1131         ssize_t remain;
1132         loff_t offset;
1133         char __user *user_data;
1134         int shmem_page_offset, page_length, ret = 0;
1135         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1136         int hit_slowpath = 0;
1137         int needs_clflush_after = 0;
1138         int needs_clflush_before = 0;
1139         struct sg_page_iter sg_iter;
1140
1141         user_data = u64_to_user_ptr(args->data_ptr);
1142         remain = args->size;
1143
1144         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1145
1146         ret = i915_gem_object_wait_rendering(obj, false);
1147         if (ret)
1148                 return ret;
1149
1150         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1151                 /* If we're not in the cpu write domain, set ourself into the gtt
1152                  * write domain and manually flush cachelines (if required). This
1153                  * optimizes for the case when the gpu will use the data
1154                  * right away and we therefore have to clflush anyway. */
1155                 needs_clflush_after = cpu_write_needs_clflush(obj);
1156         }
1157         /* Same trick applies to invalidate partially written cachelines read
1158          * before writing. */
1159         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1160                 needs_clflush_before =
1161                         !cpu_cache_is_coherent(dev, obj->cache_level);
1162
1163         ret = i915_gem_object_get_pages(obj);
1164         if (ret)
1165                 return ret;
1166
1167         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1168
1169         i915_gem_object_pin_pages(obj);
1170
1171         offset = args->offset;
1172         obj->dirty = 1;
1173
1174         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1175                          offset >> PAGE_SHIFT) {
1176                 struct page *page = sg_page_iter_page(&sg_iter);
1177                 int partial_cacheline_write;
1178
1179                 if (remain <= 0)
1180                         break;
1181
1182                 /* Operation in this page
1183                  *
1184                  * shmem_page_offset = offset within page in shmem file
1185                  * page_length = bytes to copy for this page
1186                  */
1187                 shmem_page_offset = offset_in_page(offset);
1188
1189                 page_length = remain;
1190                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1191                         page_length = PAGE_SIZE - shmem_page_offset;
1192
1193                 /* If we don't overwrite a cacheline completely we need to be
1194                  * careful to have up-to-date data by first clflushing. Don't
1195                  * overcomplicate things and flush the entire patch. */
1196                 partial_cacheline_write = needs_clflush_before &&
1197                         ((shmem_page_offset | page_length)
1198                                 & (boot_cpu_data.x86_clflush_size - 1));
1199
1200                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1201                         (page_to_phys(page) & (1 << 17)) != 0;
1202
1203                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1204                                         user_data, page_do_bit17_swizzling,
1205                                         partial_cacheline_write,
1206                                         needs_clflush_after);
1207                 if (ret == 0)
1208                         goto next_page;
1209
1210                 hit_slowpath = 1;
1211                 mutex_unlock(&dev->struct_mutex);
1212                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1213                                         user_data, page_do_bit17_swizzling,
1214                                         partial_cacheline_write,
1215                                         needs_clflush_after);
1216
1217                 mutex_lock(&dev->struct_mutex);
1218
1219                 if (ret)
1220                         goto out;
1221
1222 next_page:
1223                 remain -= page_length;
1224                 user_data += page_length;
1225                 offset += page_length;
1226         }
1227
1228 out:
1229         i915_gem_object_unpin_pages(obj);
1230
1231         if (hit_slowpath) {
1232                 /*
1233                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1234                  * cachelines in-line while writing and the object moved
1235                  * out of the cpu write domain while we've dropped the lock.
1236                  */
1237                 if (!needs_clflush_after &&
1238                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1239                         if (i915_gem_clflush_object(obj, obj->pin_display))
1240                                 needs_clflush_after = true;
1241                 }
1242         }
1243
1244         if (needs_clflush_after)
1245                 i915_gem_chipset_flush(to_i915(dev));
1246         else
1247                 obj->cache_dirty = true;
1248
1249         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1250         return ret;
1251 }
1252
1253 /**
1254  * Writes data to the object referenced by handle.
1255  * @dev: drm device
1256  * @data: ioctl data blob
1257  * @file: drm file
1258  *
1259  * On error, the contents of the buffer that were to be modified are undefined.
1260  */
1261 int
1262 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1263                       struct drm_file *file)
1264 {
1265         struct drm_i915_private *dev_priv = to_i915(dev);
1266         struct drm_i915_gem_pwrite *args = data;
1267         struct drm_i915_gem_object *obj;
1268         int ret;
1269
1270         if (args->size == 0)
1271                 return 0;
1272
1273         if (!access_ok(VERIFY_READ,
1274                        u64_to_user_ptr(args->data_ptr),
1275                        args->size))
1276                 return -EFAULT;
1277
1278         if (likely(!i915.prefault_disable)) {
1279                 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1280                                                    args->size);
1281                 if (ret)
1282                         return -EFAULT;
1283         }
1284
1285         intel_runtime_pm_get(dev_priv);
1286
1287         ret = i915_mutex_lock_interruptible(dev);
1288         if (ret)
1289                 goto put_rpm;
1290
1291         obj = i915_gem_object_lookup(file, args->handle);
1292         if (!obj) {
1293                 ret = -ENOENT;
1294                 goto unlock;
1295         }
1296
1297         /* Bounds check destination. */
1298         if (args->offset > obj->base.size ||
1299             args->size > obj->base.size - args->offset) {
1300                 ret = -EINVAL;
1301                 goto out;
1302         }
1303
1304         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1305
1306         ret = -EFAULT;
1307         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1308          * it would end up going through the fenced access, and we'll get
1309          * different detiling behavior between reading and writing.
1310          * pread/pwrite currently are reading and writing from the CPU
1311          * perspective, requiring manual detiling by the client.
1312          */
1313         if (!i915_gem_object_has_struct_page(obj) ||
1314             cpu_write_needs_clflush(obj)) {
1315                 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1316                 /* Note that the gtt paths might fail with non-page-backed user
1317                  * pointers (e.g. gtt mappings when moving data between
1318                  * textures). Fallback to the shmem path in that case. */
1319         }
1320
1321         if (ret == -EFAULT || ret == -ENOSPC) {
1322                 if (obj->phys_handle)
1323                         ret = i915_gem_phys_pwrite(obj, args, file);
1324                 else if (i915_gem_object_has_struct_page(obj))
1325                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1326                 else
1327                         ret = -ENODEV;
1328         }
1329
1330 out:
1331         i915_gem_object_put(obj);
1332 unlock:
1333         mutex_unlock(&dev->struct_mutex);
1334 put_rpm:
1335         intel_runtime_pm_put(dev_priv);
1336
1337         return ret;
1338 }
1339
1340 /**
1341  * Ensures that all rendering to the object has completed and the object is
1342  * safe to unbind from the GTT or access from the CPU.
1343  * @obj: i915 gem object
1344  * @readonly: waiting for read access or write
1345  */
1346 int
1347 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1348                                bool readonly)
1349 {
1350         struct reservation_object *resv;
1351         struct i915_gem_active *active;
1352         unsigned long active_mask;
1353         int idx, ret;
1354
1355         lockdep_assert_held(&obj->base.dev->struct_mutex);
1356
1357         if (!readonly) {
1358                 active = obj->last_read;
1359                 active_mask = obj->active;
1360         } else {
1361                 active_mask = 1;
1362                 active = &obj->last_write;
1363         }
1364
1365         for_each_active(active_mask, idx) {
1366                 ret = i915_gem_active_wait(&active[idx],
1367                                            &obj->base.dev->struct_mutex);
1368                 if (ret)
1369                         return ret;
1370         }
1371
1372         resv = i915_gem_object_get_dmabuf_resv(obj);
1373         if (resv) {
1374                 long err;
1375
1376                 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1377                                                           MAX_SCHEDULE_TIMEOUT);
1378                 if (err < 0)
1379                         return err;
1380         }
1381
1382         return 0;
1383 }
1384
1385 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1386  * as the object state may change during this call.
1387  */
1388 static __must_check int
1389 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1390                                             struct intel_rps_client *rps,
1391                                             bool readonly)
1392 {
1393         struct drm_device *dev = obj->base.dev;
1394         struct drm_i915_private *dev_priv = to_i915(dev);
1395         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1396         struct i915_gem_active *active;
1397         unsigned long active_mask;
1398         int ret, i, n = 0;
1399
1400         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1401         BUG_ON(!dev_priv->mm.interruptible);
1402
1403         active_mask = obj->active;
1404         if (!active_mask)
1405                 return 0;
1406
1407         if (!readonly) {
1408                 active = obj->last_read;
1409         } else {
1410                 active_mask = 1;
1411                 active = &obj->last_write;
1412         }
1413
1414         for_each_active(active_mask, i) {
1415                 struct drm_i915_gem_request *req;
1416
1417                 req = i915_gem_active_get(&active[i],
1418                                           &obj->base.dev->struct_mutex);
1419                 if (req)
1420                         requests[n++] = req;
1421         }
1422
1423         mutex_unlock(&dev->struct_mutex);
1424         ret = 0;
1425         for (i = 0; ret == 0 && i < n; i++)
1426                 ret = i915_wait_request(requests[i], true, NULL, rps);
1427         mutex_lock(&dev->struct_mutex);
1428
1429         for (i = 0; i < n; i++)
1430                 i915_gem_request_put(requests[i]);
1431
1432         return ret;
1433 }
1434
1435 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1436 {
1437         struct drm_i915_file_private *fpriv = file->driver_priv;
1438         return &fpriv->rps;
1439 }
1440
1441 static enum fb_op_origin
1442 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1443 {
1444         return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1445                ORIGIN_GTT : ORIGIN_CPU;
1446 }
1447
1448 /**
1449  * Called when user space prepares to use an object with the CPU, either
1450  * through the mmap ioctl's mapping or a GTT mapping.
1451  * @dev: drm device
1452  * @data: ioctl data blob
1453  * @file: drm file
1454  */
1455 int
1456 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1457                           struct drm_file *file)
1458 {
1459         struct drm_i915_gem_set_domain *args = data;
1460         struct drm_i915_gem_object *obj;
1461         uint32_t read_domains = args->read_domains;
1462         uint32_t write_domain = args->write_domain;
1463         int ret;
1464
1465         /* Only handle setting domains to types used by the CPU. */
1466         if (write_domain & I915_GEM_GPU_DOMAINS)
1467                 return -EINVAL;
1468
1469         if (read_domains & I915_GEM_GPU_DOMAINS)
1470                 return -EINVAL;
1471
1472         /* Having something in the write domain implies it's in the read
1473          * domain, and only that read domain.  Enforce that in the request.
1474          */
1475         if (write_domain != 0 && read_domains != write_domain)
1476                 return -EINVAL;
1477
1478         ret = i915_mutex_lock_interruptible(dev);
1479         if (ret)
1480                 return ret;
1481
1482         obj = i915_gem_object_lookup(file, args->handle);
1483         if (!obj) {
1484                 ret = -ENOENT;
1485                 goto unlock;
1486         }
1487
1488         /* Try to flush the object off the GPU without holding the lock.
1489          * We will repeat the flush holding the lock in the normal manner
1490          * to catch cases where we are gazumped.
1491          */
1492         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1493                                                           to_rps_client(file),
1494                                                           !write_domain);
1495         if (ret)
1496                 goto unref;
1497
1498         if (read_domains & I915_GEM_DOMAIN_GTT)
1499                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1500         else
1501                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1502
1503         if (write_domain != 0)
1504                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1505
1506 unref:
1507         i915_gem_object_put(obj);
1508 unlock:
1509         mutex_unlock(&dev->struct_mutex);
1510         return ret;
1511 }
1512
1513 /**
1514  * Called when user space has done writes to this buffer
1515  * @dev: drm device
1516  * @data: ioctl data blob
1517  * @file: drm file
1518  */
1519 int
1520 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1521                          struct drm_file *file)
1522 {
1523         struct drm_i915_gem_sw_finish *args = data;
1524         struct drm_i915_gem_object *obj;
1525         int ret = 0;
1526
1527         ret = i915_mutex_lock_interruptible(dev);
1528         if (ret)
1529                 return ret;
1530
1531         obj = i915_gem_object_lookup(file, args->handle);
1532         if (!obj) {
1533                 ret = -ENOENT;
1534                 goto unlock;
1535         }
1536
1537         /* Pinned buffers may be scanout, so flush the cache */
1538         if (obj->pin_display)
1539                 i915_gem_object_flush_cpu_write_domain(obj);
1540
1541         i915_gem_object_put(obj);
1542 unlock:
1543         mutex_unlock(&dev->struct_mutex);
1544         return ret;
1545 }
1546
1547 /**
1548  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1549  *                       it is mapped to.
1550  * @dev: drm device
1551  * @data: ioctl data blob
1552  * @file: drm file
1553  *
1554  * While the mapping holds a reference on the contents of the object, it doesn't
1555  * imply a ref on the object itself.
1556  *
1557  * IMPORTANT:
1558  *
1559  * DRM driver writers who look a this function as an example for how to do GEM
1560  * mmap support, please don't implement mmap support like here. The modern way
1561  * to implement DRM mmap support is with an mmap offset ioctl (like
1562  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1563  * That way debug tooling like valgrind will understand what's going on, hiding
1564  * the mmap call in a driver private ioctl will break that. The i915 driver only
1565  * does cpu mmaps this way because we didn't know better.
1566  */
1567 int
1568 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1569                     struct drm_file *file)
1570 {
1571         struct drm_i915_gem_mmap *args = data;
1572         struct drm_i915_gem_object *obj;
1573         unsigned long addr;
1574
1575         if (args->flags & ~(I915_MMAP_WC))
1576                 return -EINVAL;
1577
1578         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1579                 return -ENODEV;
1580
1581         obj = i915_gem_object_lookup(file, args->handle);
1582         if (!obj)
1583                 return -ENOENT;
1584
1585         /* prime objects have no backing filp to GEM mmap
1586          * pages from.
1587          */
1588         if (!obj->base.filp) {
1589                 i915_gem_object_put_unlocked(obj);
1590                 return -EINVAL;
1591         }
1592
1593         addr = vm_mmap(obj->base.filp, 0, args->size,
1594                        PROT_READ | PROT_WRITE, MAP_SHARED,
1595                        args->offset);
1596         if (args->flags & I915_MMAP_WC) {
1597                 struct mm_struct *mm = current->mm;
1598                 struct vm_area_struct *vma;
1599
1600                 if (down_write_killable(&mm->mmap_sem)) {
1601                         i915_gem_object_put_unlocked(obj);
1602                         return -EINTR;
1603                 }
1604                 vma = find_vma(mm, addr);
1605                 if (vma)
1606                         vma->vm_page_prot =
1607                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1608                 else
1609                         addr = -ENOMEM;
1610                 up_write(&mm->mmap_sem);
1611
1612                 /* This may race, but that's ok, it only gets set */
1613                 WRITE_ONCE(obj->has_wc_mmap, true);
1614         }
1615         i915_gem_object_put_unlocked(obj);
1616         if (IS_ERR((void *)addr))
1617                 return addr;
1618
1619         args->addr_ptr = (uint64_t) addr;
1620
1621         return 0;
1622 }
1623
1624 /**
1625  * i915_gem_fault - fault a page into the GTT
1626  * @vma: VMA in question
1627  * @vmf: fault info
1628  *
1629  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1630  * from userspace.  The fault handler takes care of binding the object to
1631  * the GTT (if needed), allocating and programming a fence register (again,
1632  * only if needed based on whether the old reg is still valid or the object
1633  * is tiled) and inserting a new PTE into the faulting process.
1634  *
1635  * Note that the faulting process may involve evicting existing objects
1636  * from the GTT and/or fence registers to make room.  So performance may
1637  * suffer if the GTT working set is large or there are few fence registers
1638  * left.
1639  */
1640 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1641 {
1642         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1643         struct drm_device *dev = obj->base.dev;
1644         struct drm_i915_private *dev_priv = to_i915(dev);
1645         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1646         struct i915_ggtt_view view = i915_ggtt_view_normal;
1647         pgoff_t page_offset;
1648         unsigned long pfn;
1649         int ret = 0;
1650         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1651
1652         intel_runtime_pm_get(dev_priv);
1653
1654         /* We don't use vmf->pgoff since that has the fake offset */
1655         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1656                 PAGE_SHIFT;
1657
1658         ret = i915_mutex_lock_interruptible(dev);
1659         if (ret)
1660                 goto out;
1661
1662         trace_i915_gem_object_fault(obj, page_offset, true, write);
1663
1664         /* Try to flush the object off the GPU first without holding the lock.
1665          * Upon reacquiring the lock, we will perform our sanity checks and then
1666          * repeat the flush holding the lock in the normal manner to catch cases
1667          * where we are gazumped.
1668          */
1669         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1670         if (ret)
1671                 goto unlock;
1672
1673         /* Access to snoopable pages through the GTT is incoherent. */
1674         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1675                 ret = -EFAULT;
1676                 goto unlock;
1677         }
1678
1679         /* Use a partial view if the object is bigger than the aperture. */
1680         if (obj->base.size >= ggtt->mappable_end &&
1681             obj->tiling_mode == I915_TILING_NONE) {
1682                 static const unsigned int chunk_size = 256; // 1 MiB
1683
1684                 memset(&view, 0, sizeof(view));
1685                 view.type = I915_GGTT_VIEW_PARTIAL;
1686                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1687                 view.params.partial.size =
1688                         min_t(unsigned int,
1689                               chunk_size,
1690                               (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1691                               view.params.partial.offset);
1692         }
1693
1694         /* Now pin it into the GTT if needed */
1695         ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1696         if (ret)
1697                 goto unlock;
1698
1699         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1700         if (ret)
1701                 goto unpin;
1702
1703         ret = i915_gem_object_get_fence(obj);
1704         if (ret)
1705                 goto unpin;
1706
1707         /* Finally, remap it using the new GTT offset */
1708         pfn = ggtt->mappable_base +
1709                 i915_gem_obj_ggtt_offset_view(obj, &view);
1710         pfn >>= PAGE_SHIFT;
1711
1712         if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1713                 /* Overriding existing pages in partial view does not cause
1714                  * us any trouble as TLBs are still valid because the fault
1715                  * is due to userspace losing part of the mapping or never
1716                  * having accessed it before (at this partials' range).
1717                  */
1718                 unsigned long base = vma->vm_start +
1719                                      (view.params.partial.offset << PAGE_SHIFT);
1720                 unsigned int i;
1721
1722                 for (i = 0; i < view.params.partial.size; i++) {
1723                         ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1724                         if (ret)
1725                                 break;
1726                 }
1727
1728                 obj->fault_mappable = true;
1729         } else {
1730                 if (!obj->fault_mappable) {
1731                         unsigned long size = min_t(unsigned long,
1732                                                    vma->vm_end - vma->vm_start,
1733                                                    obj->base.size);
1734                         int i;
1735
1736                         for (i = 0; i < size >> PAGE_SHIFT; i++) {
1737                                 ret = vm_insert_pfn(vma,
1738                                                     (unsigned long)vma->vm_start + i * PAGE_SIZE,
1739                                                     pfn + i);
1740                                 if (ret)
1741                                         break;
1742                         }
1743
1744                         obj->fault_mappable = true;
1745                 } else
1746                         ret = vm_insert_pfn(vma,
1747                                             (unsigned long)vmf->virtual_address,
1748                                             pfn + page_offset);
1749         }
1750 unpin:
1751         i915_gem_object_ggtt_unpin_view(obj, &view);
1752 unlock:
1753         mutex_unlock(&dev->struct_mutex);
1754 out:
1755         switch (ret) {
1756         case -EIO:
1757                 /*
1758                  * We eat errors when the gpu is terminally wedged to avoid
1759                  * userspace unduly crashing (gl has no provisions for mmaps to
1760                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1761                  * and so needs to be reported.
1762                  */
1763                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1764                         ret = VM_FAULT_SIGBUS;
1765                         break;
1766                 }
1767         case -EAGAIN:
1768                 /*
1769                  * EAGAIN means the gpu is hung and we'll wait for the error
1770                  * handler to reset everything when re-faulting in
1771                  * i915_mutex_lock_interruptible.
1772                  */
1773         case 0:
1774         case -ERESTARTSYS:
1775         case -EINTR:
1776         case -EBUSY:
1777                 /*
1778                  * EBUSY is ok: this just means that another thread
1779                  * already did the job.
1780                  */
1781                 ret = VM_FAULT_NOPAGE;
1782                 break;
1783         case -ENOMEM:
1784                 ret = VM_FAULT_OOM;
1785                 break;
1786         case -ENOSPC:
1787         case -EFAULT:
1788                 ret = VM_FAULT_SIGBUS;
1789                 break;
1790         default:
1791                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1792                 ret = VM_FAULT_SIGBUS;
1793                 break;
1794         }
1795
1796         intel_runtime_pm_put(dev_priv);
1797         return ret;
1798 }
1799
1800 /**
1801  * i915_gem_release_mmap - remove physical page mappings
1802  * @obj: obj in question
1803  *
1804  * Preserve the reservation of the mmapping with the DRM core code, but
1805  * relinquish ownership of the pages back to the system.
1806  *
1807  * It is vital that we remove the page mapping if we have mapped a tiled
1808  * object through the GTT and then lose the fence register due to
1809  * resource pressure. Similarly if the object has been moved out of the
1810  * aperture, than pages mapped into userspace must be revoked. Removing the
1811  * mapping will then trigger a page fault on the next user access, allowing
1812  * fixup by i915_gem_fault().
1813  */
1814 void
1815 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1816 {
1817         /* Serialisation between user GTT access and our code depends upon
1818          * revoking the CPU's PTE whilst the mutex is held. The next user
1819          * pagefault then has to wait until we release the mutex.
1820          */
1821         lockdep_assert_held(&obj->base.dev->struct_mutex);
1822
1823         if (!obj->fault_mappable)
1824                 return;
1825
1826         drm_vma_node_unmap(&obj->base.vma_node,
1827                            obj->base.dev->anon_inode->i_mapping);
1828
1829         /* Ensure that the CPU's PTE are revoked and there are not outstanding
1830          * memory transactions from userspace before we return. The TLB
1831          * flushing implied above by changing the PTE above *should* be
1832          * sufficient, an extra barrier here just provides us with a bit
1833          * of paranoid documentation about our requirement to serialise
1834          * memory writes before touching registers / GSM.
1835          */
1836         wmb();
1837
1838         obj->fault_mappable = false;
1839 }
1840
1841 void
1842 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1843 {
1844         struct drm_i915_gem_object *obj;
1845
1846         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1847                 i915_gem_release_mmap(obj);
1848 }
1849
1850 uint32_t
1851 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1852 {
1853         uint32_t gtt_size;
1854
1855         if (INTEL_INFO(dev)->gen >= 4 ||
1856             tiling_mode == I915_TILING_NONE)
1857                 return size;
1858
1859         /* Previous chips need a power-of-two fence region when tiling */
1860         if (IS_GEN3(dev))
1861                 gtt_size = 1024*1024;
1862         else
1863                 gtt_size = 512*1024;
1864
1865         while (gtt_size < size)
1866                 gtt_size <<= 1;
1867
1868         return gtt_size;
1869 }
1870
1871 /**
1872  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1873  * @dev: drm device
1874  * @size: object size
1875  * @tiling_mode: tiling mode
1876  * @fenced: is fenced alignemned required or not
1877  *
1878  * Return the required GTT alignment for an object, taking into account
1879  * potential fence register mapping.
1880  */
1881 uint32_t
1882 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1883                            int tiling_mode, bool fenced)
1884 {
1885         /*
1886          * Minimum alignment is 4k (GTT page size), but might be greater
1887          * if a fence register is needed for the object.
1888          */
1889         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1890             tiling_mode == I915_TILING_NONE)
1891                 return 4096;
1892
1893         /*
1894          * Previous chips need to be aligned to the size of the smallest
1895          * fence register that can contain the object.
1896          */
1897         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1898 }
1899
1900 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1901 {
1902         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1903         int ret;
1904
1905         dev_priv->mm.shrinker_no_lock_stealing = true;
1906
1907         ret = drm_gem_create_mmap_offset(&obj->base);
1908         if (ret != -ENOSPC)
1909                 goto out;
1910
1911         /* Badly fragmented mmap space? The only way we can recover
1912          * space is by destroying unwanted objects. We can't randomly release
1913          * mmap_offsets as userspace expects them to be persistent for the
1914          * lifetime of the objects. The closest we can is to release the
1915          * offsets on purgeable objects by truncating it and marking it purged,
1916          * which prevents userspace from ever using that object again.
1917          */
1918         i915_gem_shrink(dev_priv,
1919                         obj->base.size >> PAGE_SHIFT,
1920                         I915_SHRINK_BOUND |
1921                         I915_SHRINK_UNBOUND |
1922                         I915_SHRINK_PURGEABLE);
1923         ret = drm_gem_create_mmap_offset(&obj->base);
1924         if (ret != -ENOSPC)
1925                 goto out;
1926
1927         i915_gem_shrink_all(dev_priv);
1928         ret = drm_gem_create_mmap_offset(&obj->base);
1929 out:
1930         dev_priv->mm.shrinker_no_lock_stealing = false;
1931
1932         return ret;
1933 }
1934
1935 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1936 {
1937         drm_gem_free_mmap_offset(&obj->base);
1938 }
1939
1940 int
1941 i915_gem_mmap_gtt(struct drm_file *file,
1942                   struct drm_device *dev,
1943                   uint32_t handle,
1944                   uint64_t *offset)
1945 {
1946         struct drm_i915_gem_object *obj;
1947         int ret;
1948
1949         ret = i915_mutex_lock_interruptible(dev);
1950         if (ret)
1951                 return ret;
1952
1953         obj = i915_gem_object_lookup(file, handle);
1954         if (!obj) {
1955                 ret = -ENOENT;
1956                 goto unlock;
1957         }
1958
1959         if (obj->madv != I915_MADV_WILLNEED) {
1960                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1961                 ret = -EFAULT;
1962                 goto out;
1963         }
1964
1965         ret = i915_gem_object_create_mmap_offset(obj);
1966         if (ret)
1967                 goto out;
1968
1969         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1970
1971 out:
1972         i915_gem_object_put(obj);
1973 unlock:
1974         mutex_unlock(&dev->struct_mutex);
1975         return ret;
1976 }
1977
1978 /**
1979  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1980  * @dev: DRM device
1981  * @data: GTT mapping ioctl data
1982  * @file: GEM object info
1983  *
1984  * Simply returns the fake offset to userspace so it can mmap it.
1985  * The mmap call will end up in drm_gem_mmap(), which will set things
1986  * up so we can get faults in the handler above.
1987  *
1988  * The fault handler will take care of binding the object into the GTT
1989  * (since it may have been evicted to make room for something), allocating
1990  * a fence register, and mapping the appropriate aperture address into
1991  * userspace.
1992  */
1993 int
1994 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1995                         struct drm_file *file)
1996 {
1997         struct drm_i915_gem_mmap_gtt *args = data;
1998
1999         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2000 }
2001
2002 /* Immediately discard the backing storage */
2003 static void
2004 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2005 {
2006         i915_gem_object_free_mmap_offset(obj);
2007
2008         if (obj->base.filp == NULL)
2009                 return;
2010
2011         /* Our goal here is to return as much of the memory as
2012          * is possible back to the system as we are called from OOM.
2013          * To do this we must instruct the shmfs to drop all of its
2014          * backing pages, *now*.
2015          */
2016         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2017         obj->madv = __I915_MADV_PURGED;
2018 }
2019
2020 /* Try to discard unwanted pages */
2021 static void
2022 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2023 {
2024         struct address_space *mapping;
2025
2026         switch (obj->madv) {
2027         case I915_MADV_DONTNEED:
2028                 i915_gem_object_truncate(obj);
2029         case __I915_MADV_PURGED:
2030                 return;
2031         }
2032
2033         if (obj->base.filp == NULL)
2034                 return;
2035
2036         mapping = file_inode(obj->base.filp)->i_mapping,
2037         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2038 }
2039
2040 static void
2041 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2042 {
2043         struct sgt_iter sgt_iter;
2044         struct page *page;
2045         int ret;
2046
2047         BUG_ON(obj->madv == __I915_MADV_PURGED);
2048
2049         ret = i915_gem_object_set_to_cpu_domain(obj, true);
2050         if (WARN_ON(ret)) {
2051                 /* In the event of a disaster, abandon all caches and
2052                  * hope for the best.
2053                  */
2054                 i915_gem_clflush_object(obj, true);
2055                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2056         }
2057
2058         i915_gem_gtt_finish_object(obj);
2059
2060         if (i915_gem_object_needs_bit17_swizzle(obj))
2061                 i915_gem_object_save_bit_17_swizzle(obj);
2062
2063         if (obj->madv == I915_MADV_DONTNEED)
2064                 obj->dirty = 0;
2065
2066         for_each_sgt_page(page, sgt_iter, obj->pages) {
2067                 if (obj->dirty)
2068                         set_page_dirty(page);
2069
2070                 if (obj->madv == I915_MADV_WILLNEED)
2071                         mark_page_accessed(page);
2072
2073                 put_page(page);
2074         }
2075         obj->dirty = 0;
2076
2077         sg_free_table(obj->pages);
2078         kfree(obj->pages);
2079 }
2080
2081 int
2082 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2083 {
2084         const struct drm_i915_gem_object_ops *ops = obj->ops;
2085
2086         if (obj->pages == NULL)
2087                 return 0;
2088
2089         if (obj->pages_pin_count)
2090                 return -EBUSY;
2091
2092         GEM_BUG_ON(obj->bind_count);
2093
2094         /* ->put_pages might need to allocate memory for the bit17 swizzle
2095          * array, hence protect them from being reaped by removing them from gtt
2096          * lists early. */
2097         list_del(&obj->global_list);
2098
2099         if (obj->mapping) {
2100                 if (is_vmalloc_addr(obj->mapping))
2101                         vunmap(obj->mapping);
2102                 else
2103                         kunmap(kmap_to_page(obj->mapping));
2104                 obj->mapping = NULL;
2105         }
2106
2107         ops->put_pages(obj);
2108         obj->pages = NULL;
2109
2110         i915_gem_object_invalidate(obj);
2111
2112         return 0;
2113 }
2114
2115 static int
2116 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2117 {
2118         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2119         int page_count, i;
2120         struct address_space *mapping;
2121         struct sg_table *st;
2122         struct scatterlist *sg;
2123         struct sgt_iter sgt_iter;
2124         struct page *page;
2125         unsigned long last_pfn = 0;     /* suppress gcc warning */
2126         int ret;
2127         gfp_t gfp;
2128
2129         /* Assert that the object is not currently in any GPU domain. As it
2130          * wasn't in the GTT, there shouldn't be any way it could have been in
2131          * a GPU cache
2132          */
2133         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2134         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2135
2136         st = kmalloc(sizeof(*st), GFP_KERNEL);
2137         if (st == NULL)
2138                 return -ENOMEM;
2139
2140         page_count = obj->base.size / PAGE_SIZE;
2141         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2142                 kfree(st);
2143                 return -ENOMEM;
2144         }
2145
2146         /* Get the list of pages out of our struct file.  They'll be pinned
2147          * at this point until we release them.
2148          *
2149          * Fail silently without starting the shrinker
2150          */
2151         mapping = file_inode(obj->base.filp)->i_mapping;
2152         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2153         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2154         sg = st->sgl;
2155         st->nents = 0;
2156         for (i = 0; i < page_count; i++) {
2157                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2158                 if (IS_ERR(page)) {
2159                         i915_gem_shrink(dev_priv,
2160                                         page_count,
2161                                         I915_SHRINK_BOUND |
2162                                         I915_SHRINK_UNBOUND |
2163                                         I915_SHRINK_PURGEABLE);
2164                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2165                 }
2166                 if (IS_ERR(page)) {
2167                         /* We've tried hard to allocate the memory by reaping
2168                          * our own buffer, now let the real VM do its job and
2169                          * go down in flames if truly OOM.
2170                          */
2171                         i915_gem_shrink_all(dev_priv);
2172                         page = shmem_read_mapping_page(mapping, i);
2173                         if (IS_ERR(page)) {
2174                                 ret = PTR_ERR(page);
2175                                 goto err_pages;
2176                         }
2177                 }
2178 #ifdef CONFIG_SWIOTLB
2179                 if (swiotlb_nr_tbl()) {
2180                         st->nents++;
2181                         sg_set_page(sg, page, PAGE_SIZE, 0);
2182                         sg = sg_next(sg);
2183                         continue;
2184                 }
2185 #endif
2186                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2187                         if (i)
2188                                 sg = sg_next(sg);
2189                         st->nents++;
2190                         sg_set_page(sg, page, PAGE_SIZE, 0);
2191                 } else {
2192                         sg->length += PAGE_SIZE;
2193                 }
2194                 last_pfn = page_to_pfn(page);
2195
2196                 /* Check that the i965g/gm workaround works. */
2197                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2198         }
2199 #ifdef CONFIG_SWIOTLB
2200         if (!swiotlb_nr_tbl())
2201 #endif
2202                 sg_mark_end(sg);
2203         obj->pages = st;
2204
2205         ret = i915_gem_gtt_prepare_object(obj);
2206         if (ret)
2207                 goto err_pages;
2208
2209         if (i915_gem_object_needs_bit17_swizzle(obj))
2210                 i915_gem_object_do_bit_17_swizzle(obj);
2211
2212         if (obj->tiling_mode != I915_TILING_NONE &&
2213             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2214                 i915_gem_object_pin_pages(obj);
2215
2216         return 0;
2217
2218 err_pages:
2219         sg_mark_end(sg);
2220         for_each_sgt_page(page, sgt_iter, st)
2221                 put_page(page);
2222         sg_free_table(st);
2223         kfree(st);
2224
2225         /* shmemfs first checks if there is enough memory to allocate the page
2226          * and reports ENOSPC should there be insufficient, along with the usual
2227          * ENOMEM for a genuine allocation failure.
2228          *
2229          * We use ENOSPC in our driver to mean that we have run out of aperture
2230          * space and so want to translate the error from shmemfs back to our
2231          * usual understanding of ENOMEM.
2232          */
2233         if (ret == -ENOSPC)
2234                 ret = -ENOMEM;
2235
2236         return ret;
2237 }
2238
2239 /* Ensure that the associated pages are gathered from the backing storage
2240  * and pinned into our object. i915_gem_object_get_pages() may be called
2241  * multiple times before they are released by a single call to
2242  * i915_gem_object_put_pages() - once the pages are no longer referenced
2243  * either as a result of memory pressure (reaping pages under the shrinker)
2244  * or as the object is itself released.
2245  */
2246 int
2247 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2248 {
2249         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2250         const struct drm_i915_gem_object_ops *ops = obj->ops;
2251         int ret;
2252
2253         if (obj->pages)
2254                 return 0;
2255
2256         if (obj->madv != I915_MADV_WILLNEED) {
2257                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2258                 return -EFAULT;
2259         }
2260
2261         BUG_ON(obj->pages_pin_count);
2262
2263         ret = ops->get_pages(obj);
2264         if (ret)
2265                 return ret;
2266
2267         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2268
2269         obj->get_page.sg = obj->pages->sgl;
2270         obj->get_page.last = 0;
2271
2272         return 0;
2273 }
2274
2275 /* The 'mapping' part of i915_gem_object_pin_map() below */
2276 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2277 {
2278         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2279         struct sg_table *sgt = obj->pages;
2280         struct sgt_iter sgt_iter;
2281         struct page *page;
2282         struct page *stack_pages[32];
2283         struct page **pages = stack_pages;
2284         unsigned long i = 0;
2285         void *addr;
2286
2287         /* A single page can always be kmapped */
2288         if (n_pages == 1)
2289                 return kmap(sg_page(sgt->sgl));
2290
2291         if (n_pages > ARRAY_SIZE(stack_pages)) {
2292                 /* Too big for stack -- allocate temporary array instead */
2293                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2294                 if (!pages)
2295                         return NULL;
2296         }
2297
2298         for_each_sgt_page(page, sgt_iter, sgt)
2299                 pages[i++] = page;
2300
2301         /* Check that we have the expected number of pages */
2302         GEM_BUG_ON(i != n_pages);
2303
2304         addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2305
2306         if (pages != stack_pages)
2307                 drm_free_large(pages);
2308
2309         return addr;
2310 }
2311
2312 /* get, pin, and map the pages of the object into kernel space */
2313 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2314 {
2315         int ret;
2316
2317         lockdep_assert_held(&obj->base.dev->struct_mutex);
2318
2319         ret = i915_gem_object_get_pages(obj);
2320         if (ret)
2321                 return ERR_PTR(ret);
2322
2323         i915_gem_object_pin_pages(obj);
2324
2325         if (!obj->mapping) {
2326                 obj->mapping = i915_gem_object_map(obj);
2327                 if (!obj->mapping) {
2328                         i915_gem_object_unpin_pages(obj);
2329                         return ERR_PTR(-ENOMEM);
2330                 }
2331         }
2332
2333         return obj->mapping;
2334 }
2335
2336 static void
2337 i915_gem_object_retire__write(struct i915_gem_active *active,
2338                               struct drm_i915_gem_request *request)
2339 {
2340         struct drm_i915_gem_object *obj =
2341                 container_of(active, struct drm_i915_gem_object, last_write);
2342
2343         intel_fb_obj_flush(obj, true, ORIGIN_CS);
2344 }
2345
2346 static void
2347 i915_gem_object_retire__read(struct i915_gem_active *active,
2348                              struct drm_i915_gem_request *request)
2349 {
2350         int idx = request->engine->id;
2351         struct drm_i915_gem_object *obj =
2352                 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2353
2354         GEM_BUG_ON((obj->active & (1 << idx)) == 0);
2355
2356         obj->active &= ~(1 << idx);
2357         if (obj->active)
2358                 return;
2359
2360         /* Bump our place on the bound list to keep it roughly in LRU order
2361          * so that we don't steal from recently used but inactive objects
2362          * (unless we are forced to ofc!)
2363          */
2364         if (obj->bind_count)
2365                 list_move_tail(&obj->global_list,
2366                                &request->i915->mm.bound_list);
2367
2368         i915_gem_object_put(obj);
2369 }
2370
2371 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2372 {
2373         unsigned long elapsed;
2374
2375         if (ctx->hang_stats.banned)
2376                 return true;
2377
2378         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2379         if (ctx->hang_stats.ban_period_seconds &&
2380             elapsed <= ctx->hang_stats.ban_period_seconds) {
2381                 DRM_DEBUG("context hanging too fast, banning!\n");
2382                 return true;
2383         }
2384
2385         return false;
2386 }
2387
2388 static void i915_set_reset_status(struct i915_gem_context *ctx,
2389                                   const bool guilty)
2390 {
2391         struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2392
2393         if (guilty) {
2394                 hs->banned = i915_context_is_banned(ctx);
2395                 hs->batch_active++;
2396                 hs->guilty_ts = get_seconds();
2397         } else {
2398                 hs->batch_pending++;
2399         }
2400 }
2401
2402 struct drm_i915_gem_request *
2403 i915_gem_find_active_request(struct intel_engine_cs *engine)
2404 {
2405         struct drm_i915_gem_request *request;
2406
2407         /* We are called by the error capture and reset at a random
2408          * point in time. In particular, note that neither is crucially
2409          * ordered with an interrupt. After a hang, the GPU is dead and we
2410          * assume that no more writes can happen (we waited long enough for
2411          * all writes that were in transaction to be flushed) - adding an
2412          * extra delay for a recent interrupt is pointless. Hence, we do
2413          * not need an engine->irq_seqno_barrier() before the seqno reads.
2414          */
2415         list_for_each_entry(request, &engine->request_list, link) {
2416                 if (i915_gem_request_completed(request))
2417                         continue;
2418
2419                 return request;
2420         }
2421
2422         return NULL;
2423 }
2424
2425 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2426 {
2427         struct drm_i915_gem_request *request;
2428         bool ring_hung;
2429
2430         request = i915_gem_find_active_request(engine);
2431         if (request == NULL)
2432                 return;
2433
2434         ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2435
2436         i915_set_reset_status(request->ctx, ring_hung);
2437         list_for_each_entry_continue(request, &engine->request_list, link)
2438                 i915_set_reset_status(request->ctx, false);
2439 }
2440
2441 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2442 {
2443         struct intel_ring *ring;
2444
2445         /* Mark all pending requests as complete so that any concurrent
2446          * (lockless) lookup doesn't try and wait upon the request as we
2447          * reset it.
2448          */
2449         intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2450
2451         /*
2452          * Clear the execlists queue up before freeing the requests, as those
2453          * are the ones that keep the context and ringbuffer backing objects
2454          * pinned in place.
2455          */
2456
2457         if (i915.enable_execlists) {
2458                 /* Ensure irq handler finishes or is cancelled. */
2459                 tasklet_kill(&engine->irq_tasklet);
2460
2461                 intel_execlists_cancel_requests(engine);
2462         }
2463
2464         /*
2465          * We must free the requests after all the corresponding objects have
2466          * been moved off active lists. Which is the same order as the normal
2467          * retire_requests function does. This is important if object hold
2468          * implicit references on things like e.g. ppgtt address spaces through
2469          * the request.
2470          */
2471         if (!list_empty(&engine->request_list)) {
2472                 struct drm_i915_gem_request *request;
2473
2474                 request = list_last_entry(&engine->request_list,
2475                                           struct drm_i915_gem_request,
2476                                           link);
2477
2478                 i915_gem_request_retire_upto(request);
2479         }
2480
2481         /* Having flushed all requests from all queues, we know that all
2482          * ringbuffers must now be empty. However, since we do not reclaim
2483          * all space when retiring the request (to prevent HEADs colliding
2484          * with rapid ringbuffer wraparound) the amount of available space
2485          * upon reset is less than when we start. Do one more pass over
2486          * all the ringbuffers to reset last_retired_head.
2487          */
2488         list_for_each_entry(ring, &engine->buffers, link) {
2489                 ring->last_retired_head = ring->tail;
2490                 intel_ring_update_space(ring);
2491         }
2492
2493         engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2494 }
2495
2496 void i915_gem_reset(struct drm_device *dev)
2497 {
2498         struct drm_i915_private *dev_priv = to_i915(dev);
2499         struct intel_engine_cs *engine;
2500
2501         /*
2502          * Before we free the objects from the requests, we need to inspect
2503          * them for finding the guilty party. As the requests only borrow
2504          * their reference to the objects, the inspection must be done first.
2505          */
2506         for_each_engine(engine, dev_priv)
2507                 i915_gem_reset_engine_status(engine);
2508
2509         for_each_engine(engine, dev_priv)
2510                 i915_gem_reset_engine_cleanup(engine);
2511         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2512
2513         i915_gem_context_reset(dev);
2514
2515         i915_gem_restore_fences(dev);
2516 }
2517
2518 static void
2519 i915_gem_retire_work_handler(struct work_struct *work)
2520 {
2521         struct drm_i915_private *dev_priv =
2522                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2523         struct drm_device *dev = &dev_priv->drm;
2524
2525         /* Come back later if the device is busy... */
2526         if (mutex_trylock(&dev->struct_mutex)) {
2527                 i915_gem_retire_requests(dev_priv);
2528                 mutex_unlock(&dev->struct_mutex);
2529         }
2530
2531         /* Keep the retire handler running until we are finally idle.
2532          * We do not need to do this test under locking as in the worst-case
2533          * we queue the retire worker once too often.
2534          */
2535         if (READ_ONCE(dev_priv->gt.awake)) {
2536                 i915_queue_hangcheck(dev_priv);
2537                 queue_delayed_work(dev_priv->wq,
2538                                    &dev_priv->gt.retire_work,
2539                                    round_jiffies_up_relative(HZ));
2540         }
2541 }
2542
2543 static void
2544 i915_gem_idle_work_handler(struct work_struct *work)
2545 {
2546         struct drm_i915_private *dev_priv =
2547                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2548         struct drm_device *dev = &dev_priv->drm;
2549         struct intel_engine_cs *engine;
2550         unsigned int stuck_engines;
2551         bool rearm_hangcheck;
2552
2553         if (!READ_ONCE(dev_priv->gt.awake))
2554                 return;
2555
2556         if (READ_ONCE(dev_priv->gt.active_engines))
2557                 return;
2558
2559         rearm_hangcheck =
2560                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2561
2562         if (!mutex_trylock(&dev->struct_mutex)) {
2563                 /* Currently busy, come back later */
2564                 mod_delayed_work(dev_priv->wq,
2565                                  &dev_priv->gt.idle_work,
2566                                  msecs_to_jiffies(50));
2567                 goto out_rearm;
2568         }
2569
2570         if (dev_priv->gt.active_engines)
2571                 goto out_unlock;
2572
2573         for_each_engine(engine, dev_priv)
2574                 i915_gem_batch_pool_fini(&engine->batch_pool);
2575
2576         GEM_BUG_ON(!dev_priv->gt.awake);
2577         dev_priv->gt.awake = false;
2578         rearm_hangcheck = false;
2579
2580         /* As we have disabled hangcheck, we need to unstick any waiters still
2581          * hanging around. However, as we may be racing against the interrupt
2582          * handler or the waiters themselves, we skip enabling the fake-irq.
2583          */
2584         stuck_engines = intel_kick_waiters(dev_priv);
2585         if (unlikely(stuck_engines))
2586                 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2587                                  stuck_engines);
2588
2589         if (INTEL_GEN(dev_priv) >= 6)
2590                 gen6_rps_idle(dev_priv);
2591         intel_runtime_pm_put(dev_priv);
2592 out_unlock:
2593         mutex_unlock(&dev->struct_mutex);
2594
2595 out_rearm:
2596         if (rearm_hangcheck) {
2597                 GEM_BUG_ON(!dev_priv->gt.awake);
2598                 i915_queue_hangcheck(dev_priv);
2599         }
2600 }
2601
2602 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2603 {
2604         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2605         struct drm_i915_file_private *fpriv = file->driver_priv;
2606         struct i915_vma *vma, *vn;
2607
2608         mutex_lock(&obj->base.dev->struct_mutex);
2609         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2610                 if (vma->vm->file == fpriv)
2611                         i915_vma_close(vma);
2612         mutex_unlock(&obj->base.dev->struct_mutex);
2613 }
2614
2615 /**
2616  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2617  * @dev: drm device pointer
2618  * @data: ioctl data blob
2619  * @file: drm file pointer
2620  *
2621  * Returns 0 if successful, else an error is returned with the remaining time in
2622  * the timeout parameter.
2623  *  -ETIME: object is still busy after timeout
2624  *  -ERESTARTSYS: signal interrupted the wait
2625  *  -ENONENT: object doesn't exist
2626  * Also possible, but rare:
2627  *  -EAGAIN: GPU wedged
2628  *  -ENOMEM: damn
2629  *  -ENODEV: Internal IRQ fail
2630  *  -E?: The add request failed
2631  *
2632  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2633  * non-zero timeout parameter the wait ioctl will wait for the given number of
2634  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2635  * without holding struct_mutex the object may become re-busied before this
2636  * function completes. A similar but shorter * race condition exists in the busy
2637  * ioctl
2638  */
2639 int
2640 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2641 {
2642         struct drm_i915_gem_wait *args = data;
2643         struct drm_i915_gem_object *obj;
2644         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
2645         int i, n = 0;
2646         int ret;
2647
2648         if (args->flags != 0)
2649                 return -EINVAL;
2650
2651         ret = i915_mutex_lock_interruptible(dev);
2652         if (ret)
2653                 return ret;
2654
2655         obj = i915_gem_object_lookup(file, args->bo_handle);
2656         if (!obj) {
2657                 mutex_unlock(&dev->struct_mutex);
2658                 return -ENOENT;
2659         }
2660
2661         if (!obj->active)
2662                 goto out;
2663
2664         for (i = 0; i < I915_NUM_ENGINES; i++) {
2665                 struct drm_i915_gem_request *req;
2666
2667                 req = i915_gem_active_get(&obj->last_read[i],
2668                                           &obj->base.dev->struct_mutex);
2669                 if (req)
2670                         requests[n++] = req;
2671         }
2672
2673 out:
2674         i915_gem_object_put(obj);
2675         mutex_unlock(&dev->struct_mutex);
2676
2677         for (i = 0; i < n; i++) {
2678                 if (ret == 0)
2679                         ret = i915_wait_request(requests[i], true,
2680                                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2681                                                 to_rps_client(file));
2682                 i915_gem_request_put(requests[i]);
2683         }
2684         return ret;
2685 }
2686
2687 static int
2688 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2689                        struct drm_i915_gem_request *from)
2690 {
2691         int ret;
2692
2693         if (to->engine == from->engine)
2694                 return 0;
2695
2696         if (!i915.semaphores) {
2697                 ret = i915_wait_request(from,
2698                                         from->i915->mm.interruptible,
2699                                         NULL,
2700                                         NO_WAITBOOST);
2701                 if (ret)
2702                         return ret;
2703         } else {
2704                 int idx = intel_engine_sync_index(from->engine, to->engine);
2705                 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2706                         return 0;
2707
2708                 trace_i915_gem_ring_sync_to(to, from);
2709                 ret = to->engine->semaphore.sync_to(to, from);
2710                 if (ret)
2711                         return ret;
2712
2713                 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2714         }
2715
2716         return 0;
2717 }
2718
2719 /**
2720  * i915_gem_object_sync - sync an object to a ring.
2721  *
2722  * @obj: object which may be in use on another ring.
2723  * @to: request we are wishing to use
2724  *
2725  * This code is meant to abstract object synchronization with the GPU.
2726  * Conceptually we serialise writes between engines inside the GPU.
2727  * We only allow one engine to write into a buffer at any time, but
2728  * multiple readers. To ensure each has a coherent view of memory, we must:
2729  *
2730  * - If there is an outstanding write request to the object, the new
2731  *   request must wait for it to complete (either CPU or in hw, requests
2732  *   on the same ring will be naturally ordered).
2733  *
2734  * - If we are a write request (pending_write_domain is set), the new
2735  *   request must wait for outstanding read requests to complete.
2736  *
2737  * Returns 0 if successful, else propagates up the lower layer error.
2738  */
2739 int
2740 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2741                      struct drm_i915_gem_request *to)
2742 {
2743         struct i915_gem_active *active;
2744         unsigned long active_mask;
2745         int idx;
2746
2747         lockdep_assert_held(&obj->base.dev->struct_mutex);
2748
2749         active_mask = obj->active;
2750         if (!active_mask)
2751                 return 0;
2752
2753         if (obj->base.pending_write_domain) {
2754                 active = obj->last_read;
2755         } else {
2756                 active_mask = 1;
2757                 active = &obj->last_write;
2758         }
2759
2760         for_each_active(active_mask, idx) {
2761                 struct drm_i915_gem_request *request;
2762                 int ret;
2763
2764                 request = i915_gem_active_peek(&active[idx],
2765                                                &obj->base.dev->struct_mutex);
2766                 if (!request)
2767                         continue;
2768
2769                 ret = __i915_gem_object_sync(to, request);
2770                 if (ret)
2771                         return ret;
2772         }
2773
2774         return 0;
2775 }
2776
2777 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2778 {
2779         u32 old_write_domain, old_read_domains;
2780
2781         /* Force a pagefault for domain tracking on next user access */
2782         i915_gem_release_mmap(obj);
2783
2784         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2785                 return;
2786
2787         old_read_domains = obj->base.read_domains;
2788         old_write_domain = obj->base.write_domain;
2789
2790         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2791         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2792
2793         trace_i915_gem_object_change_domain(obj,
2794                                             old_read_domains,
2795                                             old_write_domain);
2796 }
2797
2798 static void __i915_vma_iounmap(struct i915_vma *vma)
2799 {
2800         GEM_BUG_ON(vma->pin_count);
2801
2802         if (vma->iomap == NULL)
2803                 return;
2804
2805         io_mapping_unmap(vma->iomap);
2806         vma->iomap = NULL;
2807 }
2808
2809 int i915_vma_unbind(struct i915_vma *vma)
2810 {
2811         struct drm_i915_gem_object *obj = vma->obj;
2812         unsigned long active;
2813         int ret;
2814
2815         /* First wait upon any activity as retiring the request may
2816          * have side-effects such as unpinning or even unbinding this vma.
2817          */
2818         active = i915_vma_get_active(vma);
2819         if (active) {
2820                 int idx;
2821
2822                 /* When a closed VMA is retired, it is unbound - eek.
2823                  * In order to prevent it from being recursively closed,
2824                  * take a pin on the vma so that the second unbind is
2825                  * aborted.
2826                  */
2827                 vma->pin_count++;
2828
2829                 for_each_active(active, idx) {
2830                         ret = i915_gem_active_retire(&vma->last_read[idx],
2831                                                    &vma->vm->dev->struct_mutex);
2832                         if (ret)
2833                                 break;
2834                 }
2835
2836                 vma->pin_count--;
2837                 if (ret)
2838                         return ret;
2839
2840                 GEM_BUG_ON(i915_vma_is_active(vma));
2841         }
2842
2843         if (vma->pin_count)
2844                 return -EBUSY;
2845
2846         if (!drm_mm_node_allocated(&vma->node))
2847                 goto destroy;
2848
2849         GEM_BUG_ON(obj->bind_count == 0);
2850         GEM_BUG_ON(!obj->pages);
2851
2852         if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2853                 i915_gem_object_finish_gtt(obj);
2854
2855                 /* release the fence reg _after_ flushing */
2856                 ret = i915_gem_object_put_fence(obj);
2857                 if (ret)
2858                         return ret;
2859
2860                 __i915_vma_iounmap(vma);
2861         }
2862
2863         if (likely(!vma->vm->closed)) {
2864                 trace_i915_vma_unbind(vma);
2865                 vma->vm->unbind_vma(vma);
2866         }
2867         vma->bound = 0;
2868
2869         drm_mm_remove_node(&vma->node);
2870         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2871
2872         if (vma->is_ggtt) {
2873                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2874                         obj->map_and_fenceable = false;
2875                 } else if (vma->ggtt_view.pages) {
2876                         sg_free_table(vma->ggtt_view.pages);
2877                         kfree(vma->ggtt_view.pages);
2878                 }
2879                 vma->ggtt_view.pages = NULL;
2880         }
2881
2882         /* Since the unbound list is global, only move to that list if
2883          * no more VMAs exist. */
2884         if (--obj->bind_count == 0)
2885                 list_move_tail(&obj->global_list,
2886                                &to_i915(obj->base.dev)->mm.unbound_list);
2887
2888         /* And finally now the object is completely decoupled from this vma,
2889          * we can drop its hold on the backing storage and allow it to be
2890          * reaped by the shrinker.
2891          */
2892         i915_gem_object_unpin_pages(obj);
2893
2894 destroy:
2895         if (unlikely(vma->closed))
2896                 i915_vma_destroy(vma);
2897
2898         return 0;
2899 }
2900
2901 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
2902 {
2903         struct intel_engine_cs *engine;
2904         int ret;
2905
2906         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2907
2908         for_each_engine(engine, dev_priv) {
2909                 if (engine->last_context == NULL)
2910                         continue;
2911
2912                 ret = intel_engine_idle(engine);
2913                 if (ret)
2914                         return ret;
2915         }
2916
2917         return 0;
2918 }
2919
2920 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2921                                      unsigned long cache_level)
2922 {
2923         struct drm_mm_node *gtt_space = &vma->node;
2924         struct drm_mm_node *other;
2925
2926         /*
2927          * On some machines we have to be careful when putting differing types
2928          * of snoopable memory together to avoid the prefetcher crossing memory
2929          * domains and dying. During vm initialisation, we decide whether or not
2930          * these constraints apply and set the drm_mm.color_adjust
2931          * appropriately.
2932          */
2933         if (vma->vm->mm.color_adjust == NULL)
2934                 return true;
2935
2936         if (!drm_mm_node_allocated(gtt_space))
2937                 return true;
2938
2939         if (list_empty(&gtt_space->node_list))
2940                 return true;
2941
2942         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2943         if (other->allocated && !other->hole_follows && other->color != cache_level)
2944                 return false;
2945
2946         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2947         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2948                 return false;
2949
2950         return true;
2951 }
2952
2953 /**
2954  * Finds free space in the GTT aperture and binds the object or a view of it
2955  * there.
2956  * @obj: object to bind
2957  * @vm: address space to bind into
2958  * @ggtt_view: global gtt view if applicable
2959  * @alignment: requested alignment
2960  * @flags: mask of PIN_* flags to use
2961  */
2962 static struct i915_vma *
2963 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
2964                            struct i915_address_space *vm,
2965                            const struct i915_ggtt_view *ggtt_view,
2966                            unsigned alignment,
2967                            uint64_t flags)
2968 {
2969         struct drm_device *dev = obj->base.dev;
2970         struct drm_i915_private *dev_priv = to_i915(dev);
2971         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2972         u32 fence_alignment, unfenced_alignment;
2973         u32 search_flag, alloc_flag;
2974         u64 start, end;
2975         u64 size, fence_size;
2976         struct i915_vma *vma;
2977         int ret;
2978
2979         if (i915_is_ggtt(vm)) {
2980                 u32 view_size;
2981
2982                 if (WARN_ON(!ggtt_view))
2983                         return ERR_PTR(-EINVAL);
2984
2985                 view_size = i915_ggtt_view_size(obj, ggtt_view);
2986
2987                 fence_size = i915_gem_get_gtt_size(dev,
2988                                                    view_size,
2989                                                    obj->tiling_mode);
2990                 fence_alignment = i915_gem_get_gtt_alignment(dev,
2991                                                              view_size,
2992                                                              obj->tiling_mode,
2993                                                              true);
2994                 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
2995                                                                 view_size,
2996                                                                 obj->tiling_mode,
2997                                                                 false);
2998                 size = flags & PIN_MAPPABLE ? fence_size : view_size;
2999         } else {
3000                 fence_size = i915_gem_get_gtt_size(dev,
3001                                                    obj->base.size,
3002                                                    obj->tiling_mode);
3003                 fence_alignment = i915_gem_get_gtt_alignment(dev,
3004                                                              obj->base.size,
3005                                                              obj->tiling_mode,
3006                                                              true);
3007                 unfenced_alignment =
3008                         i915_gem_get_gtt_alignment(dev,
3009                                                    obj->base.size,
3010                                                    obj->tiling_mode,
3011                                                    false);
3012                 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3013         }
3014
3015         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3016         end = vm->total;
3017         if (flags & PIN_MAPPABLE)
3018                 end = min_t(u64, end, ggtt->mappable_end);
3019         if (flags & PIN_ZONE_4G)
3020                 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3021
3022         if (alignment == 0)
3023                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3024                                                 unfenced_alignment;
3025         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3026                 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3027                           ggtt_view ? ggtt_view->type : 0,
3028                           alignment);
3029                 return ERR_PTR(-EINVAL);
3030         }
3031
3032         /* If binding the object/GGTT view requires more space than the entire
3033          * aperture has, reject it early before evicting everything in a vain
3034          * attempt to find space.
3035          */
3036         if (size > end) {
3037                 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3038                           ggtt_view ? ggtt_view->type : 0,
3039                           size,
3040                           flags & PIN_MAPPABLE ? "mappable" : "total",
3041                           end);
3042                 return ERR_PTR(-E2BIG);
3043         }
3044
3045         ret = i915_gem_object_get_pages(obj);
3046         if (ret)
3047                 return ERR_PTR(ret);
3048
3049         i915_gem_object_pin_pages(obj);
3050
3051         vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3052                           i915_gem_obj_lookup_or_create_vma(obj, vm);
3053
3054         if (IS_ERR(vma))
3055                 goto err_unpin;
3056
3057         if (flags & PIN_OFFSET_FIXED) {
3058                 uint64_t offset = flags & PIN_OFFSET_MASK;
3059
3060                 if (offset & (alignment - 1) || offset + size > end) {
3061                         ret = -EINVAL;
3062                         goto err_vma;
3063                 }
3064                 vma->node.start = offset;
3065                 vma->node.size = size;
3066                 vma->node.color = obj->cache_level;
3067                 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3068                 if (ret) {
3069                         ret = i915_gem_evict_for_vma(vma);
3070                         if (ret == 0)
3071                                 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3072                 }
3073                 if (ret)
3074                         goto err_vma;
3075         } else {
3076                 if (flags & PIN_HIGH) {
3077                         search_flag = DRM_MM_SEARCH_BELOW;
3078                         alloc_flag = DRM_MM_CREATE_TOP;
3079                 } else {
3080                         search_flag = DRM_MM_SEARCH_DEFAULT;
3081                         alloc_flag = DRM_MM_CREATE_DEFAULT;
3082                 }
3083
3084 search_free:
3085                 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3086                                                           size, alignment,
3087                                                           obj->cache_level,
3088                                                           start, end,
3089                                                           search_flag,
3090                                                           alloc_flag);
3091                 if (ret) {
3092                         ret = i915_gem_evict_something(dev, vm, size, alignment,
3093                                                        obj->cache_level,
3094                                                        start, end,
3095                                                        flags);
3096                         if (ret == 0)
3097                                 goto search_free;
3098
3099                         goto err_vma;
3100                 }
3101         }
3102         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3103                 ret = -EINVAL;
3104                 goto err_remove_node;
3105         }
3106
3107         trace_i915_vma_bind(vma, flags);
3108         ret = i915_vma_bind(vma, obj->cache_level, flags);
3109         if (ret)
3110                 goto err_remove_node;
3111
3112         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3113         list_move_tail(&vma->vm_link, &vm->inactive_list);
3114         obj->bind_count++;
3115
3116         return vma;
3117
3118 err_remove_node:
3119         drm_mm_remove_node(&vma->node);
3120 err_vma:
3121         vma = ERR_PTR(ret);
3122 err_unpin:
3123         i915_gem_object_unpin_pages(obj);
3124         return vma;
3125 }
3126
3127 bool
3128 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3129                         bool force)
3130 {
3131         /* If we don't have a page list set up, then we're not pinned
3132          * to GPU, and we can ignore the cache flush because it'll happen
3133          * again at bind time.
3134          */
3135         if (obj->pages == NULL)
3136                 return false;
3137
3138         /*
3139          * Stolen memory is always coherent with the GPU as it is explicitly
3140          * marked as wc by the system, or the system is cache-coherent.
3141          */
3142         if (obj->stolen || obj->phys_handle)
3143                 return false;
3144
3145         /* If the GPU is snooping the contents of the CPU cache,
3146          * we do not need to manually clear the CPU cache lines.  However,
3147          * the caches are only snooped when the render cache is
3148          * flushed/invalidated.  As we always have to emit invalidations
3149          * and flushes when moving into and out of the RENDER domain, correct
3150          * snooping behaviour occurs naturally as the result of our domain
3151          * tracking.
3152          */
3153         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3154                 obj->cache_dirty = true;
3155                 return false;
3156         }
3157
3158         trace_i915_gem_object_clflush(obj);
3159         drm_clflush_sg(obj->pages);
3160         obj->cache_dirty = false;
3161
3162         return true;
3163 }
3164
3165 /** Flushes the GTT write domain for the object if it's dirty. */
3166 static void
3167 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3168 {
3169         uint32_t old_write_domain;
3170
3171         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3172                 return;
3173
3174         /* No actual flushing is required for the GTT write domain.  Writes
3175          * to it immediately go to main memory as far as we know, so there's
3176          * no chipset flush.  It also doesn't land in render cache.
3177          *
3178          * However, we do have to enforce the order so that all writes through
3179          * the GTT land before any writes to the device, such as updates to
3180          * the GATT itself.
3181          */
3182         wmb();
3183
3184         old_write_domain = obj->base.write_domain;
3185         obj->base.write_domain = 0;
3186
3187         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3188
3189         trace_i915_gem_object_change_domain(obj,
3190                                             obj->base.read_domains,
3191                                             old_write_domain);
3192 }
3193
3194 /** Flushes the CPU write domain for the object if it's dirty. */
3195 static void
3196 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3197 {
3198         uint32_t old_write_domain;
3199
3200         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3201                 return;
3202
3203         if (i915_gem_clflush_object(obj, obj->pin_display))
3204                 i915_gem_chipset_flush(to_i915(obj->base.dev));
3205
3206         old_write_domain = obj->base.write_domain;
3207         obj->base.write_domain = 0;
3208
3209         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3210
3211         trace_i915_gem_object_change_domain(obj,
3212                                             obj->base.read_domains,
3213                                             old_write_domain);
3214 }
3215
3216 /**
3217  * Moves a single object to the GTT read, and possibly write domain.
3218  * @obj: object to act on
3219  * @write: ask for write access or read only
3220  *
3221  * This function returns when the move is complete, including waiting on
3222  * flushes to occur.
3223  */
3224 int
3225 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3226 {
3227         uint32_t old_write_domain, old_read_domains;
3228         struct i915_vma *vma;
3229         int ret;
3230
3231         ret = i915_gem_object_wait_rendering(obj, !write);
3232         if (ret)
3233                 return ret;
3234
3235         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3236                 return 0;
3237
3238         /* Flush and acquire obj->pages so that we are coherent through
3239          * direct access in memory with previous cached writes through
3240          * shmemfs and that our cache domain tracking remains valid.
3241          * For example, if the obj->filp was moved to swap without us
3242          * being notified and releasing the pages, we would mistakenly
3243          * continue to assume that the obj remained out of the CPU cached
3244          * domain.
3245          */
3246         ret = i915_gem_object_get_pages(obj);
3247         if (ret)
3248                 return ret;
3249
3250         i915_gem_object_flush_cpu_write_domain(obj);
3251
3252         /* Serialise direct access to this object with the barriers for
3253          * coherent writes from the GPU, by effectively invalidating the
3254          * GTT domain upon first access.
3255          */
3256         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3257                 mb();
3258
3259         old_write_domain = obj->base.write_domain;
3260         old_read_domains = obj->base.read_domains;
3261
3262         /* It should now be out of any other write domains, and we can update
3263          * the domain values for our changes.
3264          */
3265         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3266         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3267         if (write) {
3268                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3269                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3270                 obj->dirty = 1;
3271         }
3272
3273         trace_i915_gem_object_change_domain(obj,
3274                                             old_read_domains,
3275                                             old_write_domain);
3276
3277         /* And bump the LRU for this access */
3278         vma = i915_gem_obj_to_ggtt(obj);
3279         if (vma &&
3280             drm_mm_node_allocated(&vma->node) &&
3281             !i915_vma_is_active(vma))
3282                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3283
3284         return 0;
3285 }
3286
3287 /**
3288  * Changes the cache-level of an object across all VMA.
3289  * @obj: object to act on
3290  * @cache_level: new cache level to set for the object
3291  *
3292  * After this function returns, the object will be in the new cache-level
3293  * across all GTT and the contents of the backing storage will be coherent,
3294  * with respect to the new cache-level. In order to keep the backing storage
3295  * coherent for all users, we only allow a single cache level to be set
3296  * globally on the object and prevent it from being changed whilst the
3297  * hardware is reading from the object. That is if the object is currently
3298  * on the scanout it will be set to uncached (or equivalent display
3299  * cache coherency) and all non-MOCS GPU access will also be uncached so
3300  * that all direct access to the scanout remains coherent.
3301  */
3302 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3303                                     enum i915_cache_level cache_level)
3304 {
3305         struct i915_vma *vma;
3306         int ret = 0;
3307
3308         if (obj->cache_level == cache_level)
3309                 goto out;
3310
3311         /* Inspect the list of currently bound VMA and unbind any that would
3312          * be invalid given the new cache-level. This is principally to
3313          * catch the issue of the CS prefetch crossing page boundaries and
3314          * reading an invalid PTE on older architectures.
3315          */
3316 restart:
3317         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3318                 if (!drm_mm_node_allocated(&vma->node))
3319                         continue;
3320
3321                 if (vma->pin_count) {
3322                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3323                         return -EBUSY;
3324                 }
3325
3326                 if (i915_gem_valid_gtt_space(vma, cache_level))
3327                         continue;
3328
3329                 ret = i915_vma_unbind(vma);
3330                 if (ret)
3331                         return ret;
3332
3333                 /* As unbinding may affect other elements in the
3334                  * obj->vma_list (due to side-effects from retiring
3335                  * an active vma), play safe and restart the iterator.
3336                  */
3337                 goto restart;
3338         }
3339
3340         /* We can reuse the existing drm_mm nodes but need to change the
3341          * cache-level on the PTE. We could simply unbind them all and
3342          * rebind with the correct cache-level on next use. However since
3343          * we already have a valid slot, dma mapping, pages etc, we may as
3344          * rewrite the PTE in the belief that doing so tramples upon less
3345          * state and so involves less work.
3346          */
3347         if (obj->bind_count) {
3348                 /* Before we change the PTE, the GPU must not be accessing it.
3349                  * If we wait upon the object, we know that all the bound
3350                  * VMA are no longer active.
3351                  */
3352                 ret = i915_gem_object_wait_rendering(obj, false);
3353                 if (ret)
3354                         return ret;
3355
3356                 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3357                         /* Access to snoopable pages through the GTT is
3358                          * incoherent and on some machines causes a hard
3359                          * lockup. Relinquish the CPU mmaping to force
3360                          * userspace to refault in the pages and we can
3361                          * then double check if the GTT mapping is still
3362                          * valid for that pointer access.
3363                          */
3364                         i915_gem_release_mmap(obj);
3365
3366                         /* As we no longer need a fence for GTT access,
3367                          * we can relinquish it now (and so prevent having
3368                          * to steal a fence from someone else on the next
3369                          * fence request). Note GPU activity would have
3370                          * dropped the fence as all snoopable access is
3371                          * supposed to be linear.
3372                          */
3373                         ret = i915_gem_object_put_fence(obj);
3374                         if (ret)
3375                                 return ret;
3376                 } else {
3377                         /* We either have incoherent backing store and
3378                          * so no GTT access or the architecture is fully
3379                          * coherent. In such cases, existing GTT mmaps
3380                          * ignore the cache bit in the PTE and we can
3381                          * rewrite it without confusing the GPU or having
3382                          * to force userspace to fault back in its mmaps.
3383                          */
3384                 }
3385
3386                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3387                         if (!drm_mm_node_allocated(&vma->node))
3388                                 continue;
3389
3390                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3391                         if (ret)
3392                                 return ret;
3393                 }
3394         }
3395
3396         list_for_each_entry(vma, &obj->vma_list, obj_link)
3397                 vma->node.color = cache_level;
3398         obj->cache_level = cache_level;
3399
3400 out:
3401         /* Flush the dirty CPU caches to the backing storage so that the
3402          * object is now coherent at its new cache level (with respect
3403          * to the access domain).
3404          */
3405         if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3406                 if (i915_gem_clflush_object(obj, true))
3407                         i915_gem_chipset_flush(to_i915(obj->base.dev));
3408         }
3409
3410         return 0;
3411 }
3412
3413 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3414                                struct drm_file *file)
3415 {
3416         struct drm_i915_gem_caching *args = data;
3417         struct drm_i915_gem_object *obj;
3418
3419         obj = i915_gem_object_lookup(file, args->handle);
3420         if (!obj)
3421                 return -ENOENT;
3422
3423         switch (obj->cache_level) {
3424         case I915_CACHE_LLC:
3425         case I915_CACHE_L3_LLC:
3426                 args->caching = I915_CACHING_CACHED;
3427                 break;
3428
3429         case I915_CACHE_WT:
3430                 args->caching = I915_CACHING_DISPLAY;
3431                 break;
3432
3433         default:
3434                 args->caching = I915_CACHING_NONE;
3435                 break;
3436         }
3437
3438         i915_gem_object_put_unlocked(obj);
3439         return 0;
3440 }
3441
3442 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3443                                struct drm_file *file)
3444 {
3445         struct drm_i915_private *dev_priv = to_i915(dev);
3446         struct drm_i915_gem_caching *args = data;
3447         struct drm_i915_gem_object *obj;
3448         enum i915_cache_level level;
3449         int ret;
3450
3451         switch (args->caching) {
3452         case I915_CACHING_NONE:
3453                 level = I915_CACHE_NONE;
3454                 break;
3455         case I915_CACHING_CACHED:
3456                 /*
3457                  * Due to a HW issue on BXT A stepping, GPU stores via a
3458                  * snooped mapping may leave stale data in a corresponding CPU
3459                  * cacheline, whereas normally such cachelines would get
3460                  * invalidated.
3461                  */
3462                 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3463                         return -ENODEV;
3464
3465                 level = I915_CACHE_LLC;
3466                 break;
3467         case I915_CACHING_DISPLAY:
3468                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3469                 break;
3470         default:
3471                 return -EINVAL;
3472         }
3473
3474         intel_runtime_pm_get(dev_priv);
3475
3476         ret = i915_mutex_lock_interruptible(dev);
3477         if (ret)
3478                 goto rpm_put;
3479
3480         obj = i915_gem_object_lookup(file, args->handle);
3481         if (!obj) {
3482                 ret = -ENOENT;
3483                 goto unlock;
3484         }
3485
3486         ret = i915_gem_object_set_cache_level(obj, level);
3487
3488         i915_gem_object_put(obj);
3489 unlock:
3490         mutex_unlock(&dev->struct_mutex);
3491 rpm_put:
3492         intel_runtime_pm_put(dev_priv);
3493
3494         return ret;
3495 }
3496
3497 /*
3498  * Prepare buffer for display plane (scanout, cursors, etc).
3499  * Can be called from an uninterruptible phase (modesetting) and allows
3500  * any flushes to be pipelined (for pageflips).
3501  */
3502 int
3503 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3504                                      u32 alignment,
3505                                      const struct i915_ggtt_view *view)
3506 {
3507         u32 old_read_domains, old_write_domain;
3508         int ret;
3509
3510         /* Mark the pin_display early so that we account for the
3511          * display coherency whilst setting up the cache domains.
3512          */
3513         obj->pin_display++;
3514
3515         /* The display engine is not coherent with the LLC cache on gen6.  As
3516          * a result, we make sure that the pinning that is about to occur is
3517          * done with uncached PTEs. This is lowest common denominator for all
3518          * chipsets.
3519          *
3520          * However for gen6+, we could do better by using the GFDT bit instead
3521          * of uncaching, which would allow us to flush all the LLC-cached data
3522          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3523          */
3524         ret = i915_gem_object_set_cache_level(obj,
3525                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3526         if (ret)
3527                 goto err_unpin_display;
3528
3529         /* As the user may map the buffer once pinned in the display plane
3530          * (e.g. libkms for the bootup splash), we have to ensure that we
3531          * always use map_and_fenceable for all scanout buffers.
3532          */
3533         ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3534                                        view->type == I915_GGTT_VIEW_NORMAL ?
3535                                        PIN_MAPPABLE : 0);
3536         if (ret)
3537                 goto err_unpin_display;
3538
3539         i915_gem_object_flush_cpu_write_domain(obj);
3540
3541         old_write_domain = obj->base.write_domain;
3542         old_read_domains = obj->base.read_domains;
3543
3544         /* It should now be out of any other write domains, and we can update
3545          * the domain values for our changes.
3546          */
3547         obj->base.write_domain = 0;
3548         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3549
3550         trace_i915_gem_object_change_domain(obj,
3551                                             old_read_domains,
3552                                             old_write_domain);
3553
3554         return 0;
3555
3556 err_unpin_display:
3557         obj->pin_display--;
3558         return ret;
3559 }
3560
3561 void
3562 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3563                                          const struct i915_ggtt_view *view)
3564 {
3565         if (WARN_ON(obj->pin_display == 0))
3566                 return;
3567
3568         i915_gem_object_ggtt_unpin_view(obj, view);
3569
3570         obj->pin_display--;
3571 }
3572
3573 /**
3574  * Moves a single object to the CPU read, and possibly write domain.
3575  * @obj: object to act on
3576  * @write: requesting write or read-only access
3577  *
3578  * This function returns when the move is complete, including waiting on
3579  * flushes to occur.
3580  */
3581 int
3582 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3583 {
3584         uint32_t old_write_domain, old_read_domains;
3585         int ret;
3586
3587         ret = i915_gem_object_wait_rendering(obj, !write);
3588         if (ret)
3589                 return ret;
3590
3591         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3592                 return 0;
3593
3594         i915_gem_object_flush_gtt_write_domain(obj);
3595
3596         old_write_domain = obj->base.write_domain;
3597         old_read_domains = obj->base.read_domains;
3598
3599         /* Flush the CPU cache if it's still invalid. */
3600         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3601                 i915_gem_clflush_object(obj, false);
3602
3603                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3604         }
3605
3606         /* It should now be out of any other write domains, and we can update
3607          * the domain values for our changes.
3608          */
3609         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3610
3611         /* If we're writing through the CPU, then the GPU read domains will
3612          * need to be invalidated at next use.
3613          */
3614         if (write) {
3615                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3616                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3617         }
3618
3619         trace_i915_gem_object_change_domain(obj,
3620                                             old_read_domains,
3621                                             old_write_domain);
3622
3623         return 0;
3624 }
3625
3626 /* Throttle our rendering by waiting until the ring has completed our requests
3627  * emitted over 20 msec ago.
3628  *
3629  * Note that if we were to use the current jiffies each time around the loop,
3630  * we wouldn't escape the function with any frames outstanding if the time to
3631  * render a frame was over 20ms.
3632  *
3633  * This should get us reasonable parallelism between CPU and GPU but also
3634  * relatively low latency when blocking on a particular request to finish.
3635  */
3636 static int
3637 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3638 {
3639         struct drm_i915_private *dev_priv = to_i915(dev);
3640         struct drm_i915_file_private *file_priv = file->driver_priv;
3641         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3642         struct drm_i915_gem_request *request, *target = NULL;
3643         int ret;
3644
3645         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3646         if (ret)
3647                 return ret;
3648
3649         /* ABI: return -EIO if already wedged */
3650         if (i915_terminally_wedged(&dev_priv->gpu_error))
3651                 return -EIO;
3652
3653         spin_lock(&file_priv->mm.lock);
3654         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3655                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3656                         break;
3657
3658                 /*
3659                  * Note that the request might not have been submitted yet.
3660                  * In which case emitted_jiffies will be zero.
3661                  */
3662                 if (!request->emitted_jiffies)
3663                         continue;
3664
3665                 target = request;
3666         }
3667         if (target)
3668                 i915_gem_request_get(target);
3669         spin_unlock(&file_priv->mm.lock);
3670
3671         if (target == NULL)
3672                 return 0;
3673
3674         ret = i915_wait_request(target, true, NULL, NULL);
3675         i915_gem_request_put(target);
3676
3677         return ret;
3678 }
3679
3680 static bool
3681 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3682 {
3683         struct drm_i915_gem_object *obj = vma->obj;
3684
3685         if (alignment &&
3686             vma->node.start & (alignment - 1))
3687                 return true;
3688
3689         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3690                 return true;
3691
3692         if (flags & PIN_OFFSET_BIAS &&
3693             vma->node.start < (flags & PIN_OFFSET_MASK))
3694                 return true;
3695
3696         if (flags & PIN_OFFSET_FIXED &&
3697             vma->node.start != (flags & PIN_OFFSET_MASK))
3698                 return true;
3699
3700         return false;
3701 }
3702
3703 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3704 {
3705         struct drm_i915_gem_object *obj = vma->obj;
3706         bool mappable, fenceable;
3707         u32 fence_size, fence_alignment;
3708
3709         fence_size = i915_gem_get_gtt_size(obj->base.dev,
3710                                            obj->base.size,
3711                                            obj->tiling_mode);
3712         fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
3713                                                      obj->base.size,
3714                                                      obj->tiling_mode,
3715                                                      true);
3716
3717         fenceable = (vma->node.size == fence_size &&
3718                      (vma->node.start & (fence_alignment - 1)) == 0);
3719
3720         mappable = (vma->node.start + fence_size <=
3721                     to_i915(obj->base.dev)->ggtt.mappable_end);
3722
3723         obj->map_and_fenceable = mappable && fenceable;
3724 }
3725
3726 static int
3727 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
3728                        struct i915_address_space *vm,
3729                        const struct i915_ggtt_view *ggtt_view,
3730                        uint32_t alignment,
3731                        uint64_t flags)
3732 {
3733         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3734         struct i915_vma *vma;
3735         unsigned bound;
3736         int ret;
3737
3738         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3739                 return -ENODEV;
3740
3741         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3742                 return -EINVAL;
3743
3744         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
3745                 return -EINVAL;
3746
3747         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3748                 return -EINVAL;
3749
3750         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
3751                           i915_gem_obj_to_vma(obj, vm);
3752
3753         if (vma) {
3754                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3755                         return -EBUSY;
3756
3757                 if (i915_vma_misplaced(vma, alignment, flags)) {
3758                         WARN(vma->pin_count,
3759                              "bo is already pinned in %s with incorrect alignment:"
3760                              " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
3761                              " obj->map_and_fenceable=%d\n",
3762                              ggtt_view ? "ggtt" : "ppgtt",
3763                              upper_32_bits(vma->node.start),
3764                              lower_32_bits(vma->node.start),
3765                              alignment,
3766                              !!(flags & PIN_MAPPABLE),
3767                              obj->map_and_fenceable);
3768                         ret = i915_vma_unbind(vma);
3769                         if (ret)
3770                                 return ret;
3771
3772                         vma = NULL;
3773                 }
3774         }
3775
3776         bound = vma ? vma->bound : 0;
3777         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3778                 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
3779                                                  flags);
3780                 if (IS_ERR(vma))
3781                         return PTR_ERR(vma);
3782         } else {
3783                 ret = i915_vma_bind(vma, obj->cache_level, flags);
3784                 if (ret)
3785                         return ret;
3786         }
3787
3788         if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
3789             (bound ^ vma->bound) & GLOBAL_BIND) {
3790                 __i915_vma_set_map_and_fenceable(vma);
3791                 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3792         }
3793
3794         vma->pin_count++;
3795         return 0;
3796 }
3797
3798 int
3799 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3800                     struct i915_address_space *vm,
3801                     uint32_t alignment,
3802                     uint64_t flags)
3803 {
3804         return i915_gem_object_do_pin(obj, vm,
3805                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
3806                                       alignment, flags);
3807 }
3808
3809 int
3810 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3811                          const struct i915_ggtt_view *view,
3812                          uint32_t alignment,
3813                          uint64_t flags)
3814 {
3815         struct drm_device *dev = obj->base.dev;
3816         struct drm_i915_private *dev_priv = to_i915(dev);
3817         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3818
3819         BUG_ON(!view);
3820
3821         return i915_gem_object_do_pin(obj, &ggtt->base, view,
3822                                       alignment, flags | PIN_GLOBAL);
3823 }
3824
3825 void
3826 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3827                                 const struct i915_ggtt_view *view)
3828 {
3829         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
3830
3831         WARN_ON(vma->pin_count == 0);
3832         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
3833
3834         --vma->pin_count;
3835 }
3836
3837 int
3838 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3839                     struct drm_file *file)
3840 {
3841         struct drm_i915_gem_busy *args = data;
3842         struct drm_i915_gem_object *obj;
3843         int ret;
3844
3845         ret = i915_mutex_lock_interruptible(dev);
3846         if (ret)
3847                 return ret;
3848
3849         obj = i915_gem_object_lookup(file, args->handle);
3850         if (!obj) {
3851                 ret = -ENOENT;
3852                 goto unlock;
3853         }
3854
3855         /* Count all active objects as busy, even if they are currently not used
3856          * by the gpu. Users of this interface expect objects to eventually
3857          * become non-busy without any further actions.
3858          */
3859         args->busy = 0;
3860         if (obj->active) {
3861                 struct drm_i915_gem_request *req;
3862                 int i;
3863
3864                 for (i = 0; i < I915_NUM_ENGINES; i++) {
3865                         req = i915_gem_active_peek(&obj->last_read[i],
3866                                                    &obj->base.dev->struct_mutex);
3867                         if (req)
3868                                 args->busy |= 1 << (16 + req->engine->exec_id);
3869                 }
3870                 req = i915_gem_active_peek(&obj->last_write,
3871                                            &obj->base.dev->struct_mutex);
3872                 if (req)
3873                         args->busy |= req->engine->exec_id;
3874         }
3875
3876         i915_gem_object_put(obj);
3877 unlock:
3878         mutex_unlock(&dev->struct_mutex);
3879         return ret;
3880 }
3881
3882 int
3883 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3884                         struct drm_file *file_priv)
3885 {
3886         return i915_gem_ring_throttle(dev, file_priv);
3887 }
3888
3889 int
3890 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3891                        struct drm_file *file_priv)
3892 {
3893         struct drm_i915_private *dev_priv = to_i915(dev);
3894         struct drm_i915_gem_madvise *args = data;
3895         struct drm_i915_gem_object *obj;
3896         int ret;
3897
3898         switch (args->madv) {
3899         case I915_MADV_DONTNEED:
3900         case I915_MADV_WILLNEED:
3901             break;
3902         default:
3903             return -EINVAL;
3904         }
3905
3906         ret = i915_mutex_lock_interruptible(dev);
3907         if (ret)
3908                 return ret;
3909
3910         obj = i915_gem_object_lookup(file_priv, args->handle);
3911         if (!obj) {
3912                 ret = -ENOENT;
3913                 goto unlock;
3914         }
3915
3916         if (i915_gem_obj_is_pinned(obj)) {
3917                 ret = -EINVAL;
3918                 goto out;
3919         }
3920
3921         if (obj->pages &&
3922             obj->tiling_mode != I915_TILING_NONE &&
3923             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3924                 if (obj->madv == I915_MADV_WILLNEED)
3925                         i915_gem_object_unpin_pages(obj);
3926                 if (args->madv == I915_MADV_WILLNEED)
3927                         i915_gem_object_pin_pages(obj);
3928         }
3929
3930         if (obj->madv != __I915_MADV_PURGED)
3931                 obj->madv = args->madv;
3932
3933         /* if the object is no longer attached, discard its backing storage */
3934         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3935                 i915_gem_object_truncate(obj);
3936
3937         args->retained = obj->madv != __I915_MADV_PURGED;
3938
3939 out:
3940         i915_gem_object_put(obj);
3941 unlock:
3942         mutex_unlock(&dev->struct_mutex);
3943         return ret;
3944 }
3945
3946 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3947                           const struct drm_i915_gem_object_ops *ops)
3948 {
3949         int i;
3950
3951         INIT_LIST_HEAD(&obj->global_list);
3952         for (i = 0; i < I915_NUM_ENGINES; i++)
3953                 init_request_active(&obj->last_read[i],
3954                                     i915_gem_object_retire__read);
3955         init_request_active(&obj->last_write,
3956                             i915_gem_object_retire__write);
3957         init_request_active(&obj->last_fence, NULL);
3958         INIT_LIST_HEAD(&obj->obj_exec_link);
3959         INIT_LIST_HEAD(&obj->vma_list);
3960         INIT_LIST_HEAD(&obj->batch_pool_link);
3961
3962         obj->ops = ops;
3963
3964         obj->fence_reg = I915_FENCE_REG_NONE;
3965         obj->madv = I915_MADV_WILLNEED;
3966
3967         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3968 }
3969
3970 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3971         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3972         .get_pages = i915_gem_object_get_pages_gtt,
3973         .put_pages = i915_gem_object_put_pages_gtt,
3974 };
3975
3976 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3977                                                   size_t size)
3978 {
3979         struct drm_i915_gem_object *obj;
3980         struct address_space *mapping;
3981         gfp_t mask;
3982         int ret;
3983
3984         obj = i915_gem_object_alloc(dev);
3985         if (obj == NULL)
3986                 return ERR_PTR(-ENOMEM);
3987
3988         ret = drm_gem_object_init(dev, &obj->base, size);
3989         if (ret)
3990                 goto fail;
3991
3992         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3993         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3994                 /* 965gm cannot relocate objects above 4GiB. */
3995                 mask &= ~__GFP_HIGHMEM;
3996                 mask |= __GFP_DMA32;
3997         }
3998
3999         mapping = file_inode(obj->base.filp)->i_mapping;
4000         mapping_set_gfp_mask(mapping, mask);
4001
4002         i915_gem_object_init(obj, &i915_gem_object_ops);
4003
4004         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4005         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4006
4007         if (HAS_LLC(dev)) {
4008                 /* On some devices, we can have the GPU use the LLC (the CPU
4009                  * cache) for about a 10% performance improvement
4010                  * compared to uncached.  Graphics requests other than
4011                  * display scanout are coherent with the CPU in
4012                  * accessing this cache.  This means in this mode we
4013                  * don't need to clflush on the CPU side, and on the
4014                  * GPU side we only need to flush internal caches to
4015                  * get data visible to the CPU.
4016                  *
4017                  * However, we maintain the display planes as UC, and so
4018                  * need to rebind when first used as such.
4019                  */
4020                 obj->cache_level = I915_CACHE_LLC;
4021         } else
4022                 obj->cache_level = I915_CACHE_NONE;
4023
4024         trace_i915_gem_object_create(obj);
4025
4026         return obj;
4027
4028 fail:
4029         i915_gem_object_free(obj);
4030
4031         return ERR_PTR(ret);
4032 }
4033
4034 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4035 {
4036         /* If we are the last user of the backing storage (be it shmemfs
4037          * pages or stolen etc), we know that the pages are going to be
4038          * immediately released. In this case, we can then skip copying
4039          * back the contents from the GPU.
4040          */
4041
4042         if (obj->madv != I915_MADV_WILLNEED)
4043                 return false;
4044
4045         if (obj->base.filp == NULL)
4046                 return true;
4047
4048         /* At first glance, this looks racy, but then again so would be
4049          * userspace racing mmap against close. However, the first external
4050          * reference to the filp can only be obtained through the
4051          * i915_gem_mmap_ioctl() which safeguards us against the user
4052          * acquiring such a reference whilst we are in the middle of
4053          * freeing the object.
4054          */
4055         return atomic_long_read(&obj->base.filp->f_count) == 1;
4056 }
4057
4058 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4059 {
4060         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4061         struct drm_device *dev = obj->base.dev;
4062         struct drm_i915_private *dev_priv = to_i915(dev);
4063         struct i915_vma *vma, *next;
4064
4065         intel_runtime_pm_get(dev_priv);
4066
4067         trace_i915_gem_object_destroy(obj);
4068
4069         /* All file-owned VMA should have been released by this point through
4070          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4071          * However, the object may also be bound into the global GTT (e.g.
4072          * older GPUs without per-process support, or for direct access through
4073          * the GTT either for the user or for scanout). Those VMA still need to
4074          * unbound now.
4075          */
4076         list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4077                 GEM_BUG_ON(!vma->is_ggtt);
4078                 GEM_BUG_ON(i915_vma_is_active(vma));
4079                 vma->pin_count = 0;
4080                 i915_vma_close(vma);
4081         }
4082         GEM_BUG_ON(obj->bind_count);
4083
4084         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4085          * before progressing. */
4086         if (obj->stolen)
4087                 i915_gem_object_unpin_pages(obj);
4088
4089         WARN_ON(obj->frontbuffer_bits);
4090
4091         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4092             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4093             obj->tiling_mode != I915_TILING_NONE)
4094                 i915_gem_object_unpin_pages(obj);
4095
4096         if (WARN_ON(obj->pages_pin_count))
4097                 obj->pages_pin_count = 0;
4098         if (discard_backing_storage(obj))
4099                 obj->madv = I915_MADV_DONTNEED;
4100         i915_gem_object_put_pages(obj);
4101
4102         BUG_ON(obj->pages);
4103
4104         if (obj->base.import_attach)
4105                 drm_prime_gem_destroy(&obj->base, NULL);
4106
4107         if (obj->ops->release)
4108                 obj->ops->release(obj);
4109
4110         drm_gem_object_release(&obj->base);
4111         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4112
4113         kfree(obj->bit_17);
4114         i915_gem_object_free(obj);
4115
4116         intel_runtime_pm_put(dev_priv);
4117 }
4118
4119 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4120                                      struct i915_address_space *vm)
4121 {
4122         struct i915_vma *vma;
4123         list_for_each_entry(vma, &obj->vma_list, obj_link) {
4124                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4125                     vma->vm == vm)
4126                         return vma;
4127         }
4128         return NULL;
4129 }
4130
4131 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4132                                            const struct i915_ggtt_view *view)
4133 {
4134         struct i915_vma *vma;
4135
4136         GEM_BUG_ON(!view);
4137
4138         list_for_each_entry(vma, &obj->vma_list, obj_link)
4139                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4140                         return vma;
4141         return NULL;
4142 }
4143
4144 static void
4145 i915_gem_stop_engines(struct drm_device *dev)
4146 {
4147         struct drm_i915_private *dev_priv = to_i915(dev);
4148         struct intel_engine_cs *engine;
4149
4150         for_each_engine(engine, dev_priv)
4151                 dev_priv->gt.stop_engine(engine);
4152 }
4153
4154 int
4155 i915_gem_suspend(struct drm_device *dev)
4156 {
4157         struct drm_i915_private *dev_priv = to_i915(dev);
4158         int ret = 0;
4159
4160         intel_suspend_gt_powersave(dev_priv);
4161
4162         mutex_lock(&dev->struct_mutex);
4163
4164         /* We have to flush all the executing contexts to main memory so
4165          * that they can saved in the hibernation image. To ensure the last
4166          * context image is coherent, we have to switch away from it. That
4167          * leaves the dev_priv->kernel_context still active when
4168          * we actually suspend, and its image in memory may not match the GPU
4169          * state. Fortunately, the kernel_context is disposable and we do
4170          * not rely on its state.
4171          */
4172         ret = i915_gem_switch_to_kernel_context(dev_priv);
4173         if (ret)
4174                 goto err;
4175
4176         ret = i915_gem_wait_for_idle(dev_priv);
4177         if (ret)
4178                 goto err;
4179
4180         i915_gem_retire_requests(dev_priv);
4181
4182         /* Note that rather than stopping the engines, all we have to do
4183          * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4184          * and similar for all logical context images (to ensure they are
4185          * all ready for hibernation).
4186          */
4187         i915_gem_stop_engines(dev);
4188         i915_gem_context_lost(dev_priv);
4189         mutex_unlock(&dev->struct_mutex);
4190
4191         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4192         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4193         flush_delayed_work(&dev_priv->gt.idle_work);
4194
4195         /* Assert that we sucessfully flushed all the work and
4196          * reset the GPU back to its idle, low power state.
4197          */
4198         WARN_ON(dev_priv->gt.awake);
4199
4200         return 0;
4201
4202 err:
4203         mutex_unlock(&dev->struct_mutex);
4204         return ret;
4205 }
4206
4207 void i915_gem_resume(struct drm_device *dev)
4208 {
4209         struct drm_i915_private *dev_priv = to_i915(dev);
4210
4211         mutex_lock(&dev->struct_mutex);
4212         i915_gem_restore_gtt_mappings(dev);
4213
4214         /* As we didn't flush the kernel context before suspend, we cannot
4215          * guarantee that the context image is complete. So let's just reset
4216          * it and start again.
4217          */
4218         if (i915.enable_execlists)
4219                 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4220
4221         mutex_unlock(&dev->struct_mutex);
4222 }
4223
4224 void i915_gem_init_swizzling(struct drm_device *dev)
4225 {
4226         struct drm_i915_private *dev_priv = to_i915(dev);
4227
4228         if (INTEL_INFO(dev)->gen < 5 ||
4229             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4230                 return;
4231
4232         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4233                                  DISP_TILE_SURFACE_SWIZZLING);
4234
4235         if (IS_GEN5(dev))
4236                 return;
4237
4238         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4239         if (IS_GEN6(dev))
4240                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4241         else if (IS_GEN7(dev))
4242                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4243         else if (IS_GEN8(dev))
4244                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4245         else
4246                 BUG();
4247 }
4248
4249 static void init_unused_ring(struct drm_device *dev, u32 base)
4250 {
4251         struct drm_i915_private *dev_priv = to_i915(dev);
4252
4253         I915_WRITE(RING_CTL(base), 0);
4254         I915_WRITE(RING_HEAD(base), 0);
4255         I915_WRITE(RING_TAIL(base), 0);
4256         I915_WRITE(RING_START(base), 0);
4257 }
4258
4259 static void init_unused_rings(struct drm_device *dev)
4260 {
4261         if (IS_I830(dev)) {
4262                 init_unused_ring(dev, PRB1_BASE);
4263                 init_unused_ring(dev, SRB0_BASE);
4264                 init_unused_ring(dev, SRB1_BASE);
4265                 init_unused_ring(dev, SRB2_BASE);
4266                 init_unused_ring(dev, SRB3_BASE);
4267         } else if (IS_GEN2(dev)) {
4268                 init_unused_ring(dev, SRB0_BASE);
4269                 init_unused_ring(dev, SRB1_BASE);
4270         } else if (IS_GEN3(dev)) {
4271                 init_unused_ring(dev, PRB1_BASE);
4272                 init_unused_ring(dev, PRB2_BASE);
4273         }
4274 }
4275
4276 int
4277 i915_gem_init_hw(struct drm_device *dev)
4278 {
4279         struct drm_i915_private *dev_priv = to_i915(dev);
4280         struct intel_engine_cs *engine;
4281         int ret;
4282
4283         /* Double layer security blanket, see i915_gem_init() */
4284         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4285
4286         if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4287                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4288
4289         if (IS_HASWELL(dev))
4290                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4291                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4292
4293         if (HAS_PCH_NOP(dev)) {
4294                 if (IS_IVYBRIDGE(dev)) {
4295                         u32 temp = I915_READ(GEN7_MSG_CTL);
4296                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4297                         I915_WRITE(GEN7_MSG_CTL, temp);
4298                 } else if (INTEL_INFO(dev)->gen >= 7) {
4299                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4300                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4301                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4302                 }
4303         }
4304
4305         i915_gem_init_swizzling(dev);
4306
4307         /*
4308          * At least 830 can leave some of the unused rings
4309          * "active" (ie. head != tail) after resume which
4310          * will prevent c3 entry. Makes sure all unused rings
4311          * are totally idle.
4312          */
4313         init_unused_rings(dev);
4314
4315         BUG_ON(!dev_priv->kernel_context);
4316
4317         ret = i915_ppgtt_init_hw(dev);
4318         if (ret) {
4319                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4320                 goto out;
4321         }
4322
4323         /* Need to do basic initialisation of all rings first: */
4324         for_each_engine(engine, dev_priv) {
4325                 ret = engine->init_hw(engine);
4326                 if (ret)
4327                         goto out;
4328         }
4329
4330         intel_mocs_init_l3cc_table(dev);
4331
4332         /* We can't enable contexts until all firmware is loaded */
4333         ret = intel_guc_setup(dev);
4334         if (ret)
4335                 goto out;
4336
4337 out:
4338         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4339         return ret;
4340 }
4341
4342 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4343 {
4344         if (INTEL_INFO(dev_priv)->gen < 6)
4345                 return false;
4346
4347         /* TODO: make semaphores and Execlists play nicely together */
4348         if (i915.enable_execlists)
4349                 return false;
4350
4351         if (value >= 0)
4352                 return value;
4353
4354 #ifdef CONFIG_INTEL_IOMMU
4355         /* Enable semaphores on SNB when IO remapping is off */
4356         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4357                 return false;
4358 #endif
4359
4360         return true;
4361 }
4362
4363 int i915_gem_init(struct drm_device *dev)
4364 {
4365         struct drm_i915_private *dev_priv = to_i915(dev);
4366         int ret;
4367
4368         mutex_lock(&dev->struct_mutex);
4369
4370         if (!i915.enable_execlists) {
4371                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4372                 dev_priv->gt.stop_engine = intel_engine_stop;
4373         } else {
4374                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4375                 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4376         }
4377
4378         /* This is just a security blanket to placate dragons.
4379          * On some systems, we very sporadically observe that the first TLBs
4380          * used by the CS may be stale, despite us poking the TLB reset. If
4381          * we hold the forcewake during initialisation these problems
4382          * just magically go away.
4383          */
4384         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4385
4386         i915_gem_init_userptr(dev_priv);
4387
4388         ret = i915_gem_init_ggtt(dev_priv);
4389         if (ret)
4390                 goto out_unlock;
4391
4392         ret = i915_gem_context_init(dev);
4393         if (ret)
4394                 goto out_unlock;
4395
4396         ret = intel_engines_init(dev);
4397         if (ret)
4398                 goto out_unlock;
4399
4400         ret = i915_gem_init_hw(dev);
4401         if (ret == -EIO) {
4402                 /* Allow engine initialisation to fail by marking the GPU as
4403                  * wedged. But we only want to do this where the GPU is angry,
4404                  * for all other failure, such as an allocation failure, bail.
4405                  */
4406                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4407                 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4408                 ret = 0;
4409         }
4410
4411 out_unlock:
4412         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4413         mutex_unlock(&dev->struct_mutex);
4414
4415         return ret;
4416 }
4417
4418 void
4419 i915_gem_cleanup_engines(struct drm_device *dev)
4420 {
4421         struct drm_i915_private *dev_priv = to_i915(dev);
4422         struct intel_engine_cs *engine;
4423
4424         for_each_engine(engine, dev_priv)
4425                 dev_priv->gt.cleanup_engine(engine);
4426 }
4427
4428 static void
4429 init_engine_lists(struct intel_engine_cs *engine)
4430 {
4431         INIT_LIST_HEAD(&engine->request_list);
4432 }
4433
4434 void
4435 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4436 {
4437         struct drm_device *dev = &dev_priv->drm;
4438
4439         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4440             !IS_CHERRYVIEW(dev_priv))
4441                 dev_priv->num_fence_regs = 32;
4442         else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4443                  IS_I945GM(dev_priv) || IS_G33(dev_priv))
4444                 dev_priv->num_fence_regs = 16;
4445         else
4446                 dev_priv->num_fence_regs = 8;
4447
4448         if (intel_vgpu_active(dev_priv))
4449                 dev_priv->num_fence_regs =
4450                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4451
4452         /* Initialize fence registers to zero */
4453         i915_gem_restore_fences(dev);
4454
4455         i915_gem_detect_bit_6_swizzle(dev);
4456 }
4457
4458 void
4459 i915_gem_load_init(struct drm_device *dev)
4460 {
4461         struct drm_i915_private *dev_priv = to_i915(dev);
4462         int i;
4463
4464         dev_priv->objects =
4465                 kmem_cache_create("i915_gem_object",
4466                                   sizeof(struct drm_i915_gem_object), 0,
4467                                   SLAB_HWCACHE_ALIGN,
4468                                   NULL);
4469         dev_priv->vmas =
4470                 kmem_cache_create("i915_gem_vma",
4471                                   sizeof(struct i915_vma), 0,
4472                                   SLAB_HWCACHE_ALIGN,
4473                                   NULL);
4474         dev_priv->requests =
4475                 kmem_cache_create("i915_gem_request",
4476                                   sizeof(struct drm_i915_gem_request), 0,
4477                                   SLAB_HWCACHE_ALIGN,
4478                                   NULL);
4479
4480         INIT_LIST_HEAD(&dev_priv->context_list);
4481         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4482         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4483         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4484         for (i = 0; i < I915_NUM_ENGINES; i++)
4485                 init_engine_lists(&dev_priv->engine[i]);
4486         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4487                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4488         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4489                           i915_gem_retire_work_handler);
4490         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4491                           i915_gem_idle_work_handler);
4492         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4493         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4494
4495         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4496
4497         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4498
4499         init_waitqueue_head(&dev_priv->pending_flip_queue);
4500
4501         dev_priv->mm.interruptible = true;
4502
4503         mutex_init(&dev_priv->fb_tracking.lock);
4504 }
4505
4506 void i915_gem_load_cleanup(struct drm_device *dev)
4507 {
4508         struct drm_i915_private *dev_priv = to_i915(dev);
4509
4510         kmem_cache_destroy(dev_priv->requests);
4511         kmem_cache_destroy(dev_priv->vmas);
4512         kmem_cache_destroy(dev_priv->objects);
4513 }
4514
4515 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4516 {
4517         struct drm_i915_gem_object *obj;
4518
4519         /* Called just before we write the hibernation image.
4520          *
4521          * We need to update the domain tracking to reflect that the CPU
4522          * will be accessing all the pages to create and restore from the
4523          * hibernation, and so upon restoration those pages will be in the
4524          * CPU domain.
4525          *
4526          * To make sure the hibernation image contains the latest state,
4527          * we update that state just before writing out the image.
4528          */
4529
4530         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4531                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4532                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4533         }
4534
4535         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4536                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4537                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4538         }
4539
4540         return 0;
4541 }
4542
4543 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4544 {
4545         struct drm_i915_file_private *file_priv = file->driver_priv;
4546         struct drm_i915_gem_request *request;
4547
4548         /* Clean up our request list when the client is going away, so that
4549          * later retire_requests won't dereference our soon-to-be-gone
4550          * file_priv.
4551          */
4552         spin_lock(&file_priv->mm.lock);
4553         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4554                 request->file_priv = NULL;
4555         spin_unlock(&file_priv->mm.lock);
4556
4557         if (!list_empty(&file_priv->rps.link)) {
4558                 spin_lock(&to_i915(dev)->rps.client_lock);
4559                 list_del(&file_priv->rps.link);
4560                 spin_unlock(&to_i915(dev)->rps.client_lock);
4561         }
4562 }
4563
4564 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4565 {
4566         struct drm_i915_file_private *file_priv;
4567         int ret;
4568
4569         DRM_DEBUG_DRIVER("\n");
4570
4571         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4572         if (!file_priv)
4573                 return -ENOMEM;
4574
4575         file->driver_priv = file_priv;
4576         file_priv->dev_priv = to_i915(dev);
4577         file_priv->file = file;
4578         INIT_LIST_HEAD(&file_priv->rps.link);
4579
4580         spin_lock_init(&file_priv->mm.lock);
4581         INIT_LIST_HEAD(&file_priv->mm.request_list);
4582
4583         file_priv->bsd_engine = -1;
4584
4585         ret = i915_gem_context_open(dev, file);
4586         if (ret)
4587                 kfree(file_priv);
4588
4589         return ret;
4590 }
4591
4592 /**
4593  * i915_gem_track_fb - update frontbuffer tracking
4594  * @old: current GEM buffer for the frontbuffer slots
4595  * @new: new GEM buffer for the frontbuffer slots
4596  * @frontbuffer_bits: bitmask of frontbuffer slots
4597  *
4598  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4599  * from @old and setting them in @new. Both @old and @new can be NULL.
4600  */
4601 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4602                        struct drm_i915_gem_object *new,
4603                        unsigned frontbuffer_bits)
4604 {
4605         if (old) {
4606                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4607                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4608                 old->frontbuffer_bits &= ~frontbuffer_bits;
4609         }
4610
4611         if (new) {
4612                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4613                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4614                 new->frontbuffer_bits |= frontbuffer_bits;
4615         }
4616 }
4617
4618 /* All the new VM stuff */
4619 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4620                         struct i915_address_space *vm)
4621 {
4622         struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4623         struct i915_vma *vma;
4624
4625         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4626
4627         list_for_each_entry(vma, &o->vma_list, obj_link) {
4628                 if (vma->is_ggtt &&
4629                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4630                         continue;
4631                 if (vma->vm == vm)
4632                         return vma->node.start;
4633         }
4634
4635         WARN(1, "%s vma for this object not found.\n",
4636              i915_is_ggtt(vm) ? "global" : "ppgtt");
4637         return -1;
4638 }
4639
4640 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4641                                   const struct i915_ggtt_view *view)
4642 {
4643         struct i915_vma *vma;
4644
4645         list_for_each_entry(vma, &o->vma_list, obj_link)
4646                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4647                         return vma->node.start;
4648
4649         WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4650         return -1;
4651 }
4652
4653 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4654                         struct i915_address_space *vm)
4655 {
4656         struct i915_vma *vma;
4657
4658         list_for_each_entry(vma, &o->vma_list, obj_link) {
4659                 if (vma->is_ggtt &&
4660                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4661                         continue;
4662                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4663                         return true;
4664         }
4665
4666         return false;
4667 }
4668
4669 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4670                                   const struct i915_ggtt_view *view)
4671 {
4672         struct i915_vma *vma;
4673
4674         list_for_each_entry(vma, &o->vma_list, obj_link)
4675                 if (vma->is_ggtt &&
4676                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4677                     drm_mm_node_allocated(&vma->node))
4678                         return true;
4679
4680         return false;
4681 }
4682
4683 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4684 {
4685         struct i915_vma *vma;
4686
4687         GEM_BUG_ON(list_empty(&o->vma_list));
4688
4689         list_for_each_entry(vma, &o->vma_list, obj_link) {
4690                 if (vma->is_ggtt &&
4691                     vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4692                         return vma->node.size;
4693         }
4694
4695         return 0;
4696 }
4697
4698 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4699 {
4700         struct i915_vma *vma;
4701         list_for_each_entry(vma, &obj->vma_list, obj_link)
4702                 if (vma->pin_count > 0)
4703                         return true;
4704
4705         return false;
4706 }
4707
4708 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4709 struct page *
4710 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4711 {
4712         struct page *page;
4713
4714         /* Only default objects have per-page dirty tracking */
4715         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4716                 return NULL;
4717
4718         page = i915_gem_object_get_page(obj, n);
4719         set_page_dirty(page);
4720         return page;
4721 }
4722
4723 /* Allocate a new GEM object and fill it with the supplied data */
4724 struct drm_i915_gem_object *
4725 i915_gem_object_create_from_data(struct drm_device *dev,
4726                                  const void *data, size_t size)
4727 {
4728         struct drm_i915_gem_object *obj;
4729         struct sg_table *sg;
4730         size_t bytes;
4731         int ret;
4732
4733         obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4734         if (IS_ERR(obj))
4735                 return obj;
4736
4737         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4738         if (ret)
4739                 goto fail;
4740
4741         ret = i915_gem_object_get_pages(obj);
4742         if (ret)
4743                 goto fail;
4744
4745         i915_gem_object_pin_pages(obj);
4746         sg = obj->pages;
4747         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4748         obj->dirty = 1;         /* Backing store is now out of date */
4749         i915_gem_object_unpin_pages(obj);
4750
4751         if (WARN_ON(bytes != size)) {
4752                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4753                 ret = -EFAULT;
4754                 goto fail;
4755         }
4756
4757         return obj;
4758
4759 fail:
4760         i915_gem_object_put(obj);
4761         return ERR_PTR(ret);
4762 }