d815ef51a5eac9fb91587b73d0bb63b32f026c9b
[cascardo/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40 #define BATCH_OFFSET_BIAS (256*1024)
41
42 struct eb_vmas {
43         struct list_head vmas;
44         int and;
45         union {
46                 struct i915_vma *lut[0];
47                 struct hlist_head buckets[0];
48         };
49 };
50
51 static struct eb_vmas *
52 eb_create(struct drm_i915_gem_execbuffer2 *args)
53 {
54         struct eb_vmas *eb = NULL;
55
56         if (args->flags & I915_EXEC_HANDLE_LUT) {
57                 unsigned size = args->buffer_count;
58                 size *= sizeof(struct i915_vma *);
59                 size += sizeof(struct eb_vmas);
60                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
61         }
62
63         if (eb == NULL) {
64                 unsigned size = args->buffer_count;
65                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
66                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
67                 while (count > 2*size)
68                         count >>= 1;
69                 eb = kzalloc(count*sizeof(struct hlist_head) +
70                              sizeof(struct eb_vmas),
71                              GFP_TEMPORARY);
72                 if (eb == NULL)
73                         return eb;
74
75                 eb->and = count - 1;
76         } else
77                 eb->and = -args->buffer_count;
78
79         INIT_LIST_HEAD(&eb->vmas);
80         return eb;
81 }
82
83 static void
84 eb_reset(struct eb_vmas *eb)
85 {
86         if (eb->and >= 0)
87                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
88 }
89
90 static int
91 eb_lookup_vmas(struct eb_vmas *eb,
92                struct drm_i915_gem_exec_object2 *exec,
93                const struct drm_i915_gem_execbuffer2 *args,
94                struct i915_address_space *vm,
95                struct drm_file *file)
96 {
97         struct drm_i915_private *dev_priv = vm->dev->dev_private;
98         struct drm_i915_gem_object *obj;
99         struct list_head objects;
100         int i, ret;
101
102         INIT_LIST_HEAD(&objects);
103         spin_lock(&file->table_lock);
104         /* Grab a reference to the object and release the lock so we can lookup
105          * or create the VMA without using GFP_ATOMIC */
106         for (i = 0; i < args->buffer_count; i++) {
107                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
108                 if (obj == NULL) {
109                         spin_unlock(&file->table_lock);
110                         DRM_DEBUG("Invalid object handle %d at index %d\n",
111                                    exec[i].handle, i);
112                         ret = -ENOENT;
113                         goto err;
114                 }
115
116                 if (!list_empty(&obj->obj_exec_link)) {
117                         spin_unlock(&file->table_lock);
118                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
119                                    obj, exec[i].handle, i);
120                         ret = -EINVAL;
121                         goto err;
122                 }
123
124                 drm_gem_object_reference(&obj->base);
125                 list_add_tail(&obj->obj_exec_link, &objects);
126         }
127         spin_unlock(&file->table_lock);
128
129         i = 0;
130         while (!list_empty(&objects)) {
131                 struct i915_vma *vma;
132                 struct i915_address_space *bind_vm = vm;
133
134                 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
135                     USES_FULL_PPGTT(vm->dev)) {
136                         ret = -EINVAL;
137                         goto err;
138                 }
139
140                 /* If we have secure dispatch, or the userspace assures us that
141                  * they know what they're doing, use the GGTT VM.
142                  */
143                 if (((args->flags & I915_EXEC_SECURE) &&
144                     (i == (args->buffer_count - 1))))
145                         bind_vm = &dev_priv->gtt.base;
146
147                 obj = list_first_entry(&objects,
148                                        struct drm_i915_gem_object,
149                                        obj_exec_link);
150
151                 /*
152                  * NOTE: We can leak any vmas created here when something fails
153                  * later on. But that's no issue since vma_unbind can deal with
154                  * vmas which are not actually bound. And since only
155                  * lookup_or_create exists as an interface to get at the vma
156                  * from the (obj, vm) we don't run the risk of creating
157                  * duplicated vmas for the same vm.
158                  */
159                 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
160                 if (IS_ERR(vma)) {
161                         DRM_DEBUG("Failed to lookup VMA\n");
162                         ret = PTR_ERR(vma);
163                         goto err;
164                 }
165
166                 /* Transfer ownership from the objects list to the vmas list. */
167                 list_add_tail(&vma->exec_list, &eb->vmas);
168                 list_del_init(&obj->obj_exec_link);
169
170                 vma->exec_entry = &exec[i];
171                 if (eb->and < 0) {
172                         eb->lut[i] = vma;
173                 } else {
174                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
175                         vma->exec_handle = handle;
176                         hlist_add_head(&vma->exec_node,
177                                        &eb->buckets[handle & eb->and]);
178                 }
179                 ++i;
180         }
181
182         return 0;
183
184
185 err:
186         while (!list_empty(&objects)) {
187                 obj = list_first_entry(&objects,
188                                        struct drm_i915_gem_object,
189                                        obj_exec_link);
190                 list_del_init(&obj->obj_exec_link);
191                 drm_gem_object_unreference(&obj->base);
192         }
193         /*
194          * Objects already transfered to the vmas list will be unreferenced by
195          * eb_destroy.
196          */
197
198         return ret;
199 }
200
201 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
202 {
203         if (eb->and < 0) {
204                 if (handle >= -eb->and)
205                         return NULL;
206                 return eb->lut[handle];
207         } else {
208                 struct hlist_head *head;
209                 struct hlist_node *node;
210
211                 head = &eb->buckets[handle & eb->and];
212                 hlist_for_each(node, head) {
213                         struct i915_vma *vma;
214
215                         vma = hlist_entry(node, struct i915_vma, exec_node);
216                         if (vma->exec_handle == handle)
217                                 return vma;
218                 }
219                 return NULL;
220         }
221 }
222
223 static void
224 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
225 {
226         struct drm_i915_gem_exec_object2 *entry;
227         struct drm_i915_gem_object *obj = vma->obj;
228
229         if (!drm_mm_node_allocated(&vma->node))
230                 return;
231
232         entry = vma->exec_entry;
233
234         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
235                 i915_gem_object_unpin_fence(obj);
236
237         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
238                 vma->pin_count--;
239
240         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
241 }
242
243 static void eb_destroy(struct eb_vmas *eb)
244 {
245         while (!list_empty(&eb->vmas)) {
246                 struct i915_vma *vma;
247
248                 vma = list_first_entry(&eb->vmas,
249                                        struct i915_vma,
250                                        exec_list);
251                 list_del_init(&vma->exec_list);
252                 i915_gem_execbuffer_unreserve_vma(vma);
253                 drm_gem_object_unreference(&vma->obj->base);
254         }
255         kfree(eb);
256 }
257
258 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
259 {
260         return (HAS_LLC(obj->base.dev) ||
261                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
262                 !obj->map_and_fenceable ||
263                 obj->cache_level != I915_CACHE_NONE);
264 }
265
266 static int
267 relocate_entry_cpu(struct drm_i915_gem_object *obj,
268                    struct drm_i915_gem_relocation_entry *reloc,
269                    uint64_t target_offset)
270 {
271         struct drm_device *dev = obj->base.dev;
272         uint32_t page_offset = offset_in_page(reloc->offset);
273         uint64_t delta = reloc->delta + target_offset;
274         char *vaddr;
275         int ret;
276
277         ret = i915_gem_object_set_to_cpu_domain(obj, true);
278         if (ret)
279                 return ret;
280
281         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
282                                 reloc->offset >> PAGE_SHIFT));
283         *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
284
285         if (INTEL_INFO(dev)->gen >= 8) {
286                 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
287
288                 if (page_offset == 0) {
289                         kunmap_atomic(vaddr);
290                         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
291                             (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
292                 }
293
294                 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
295         }
296
297         kunmap_atomic(vaddr);
298
299         return 0;
300 }
301
302 static int
303 relocate_entry_gtt(struct drm_i915_gem_object *obj,
304                    struct drm_i915_gem_relocation_entry *reloc,
305                    uint64_t target_offset)
306 {
307         struct drm_device *dev = obj->base.dev;
308         struct drm_i915_private *dev_priv = dev->dev_private;
309         uint64_t delta = reloc->delta + target_offset;
310         uint32_t __iomem *reloc_entry;
311         void __iomem *reloc_page;
312         int ret;
313
314         ret = i915_gem_object_set_to_gtt_domain(obj, true);
315         if (ret)
316                 return ret;
317
318         ret = i915_gem_object_put_fence(obj);
319         if (ret)
320                 return ret;
321
322         /* Map the page containing the relocation we're going to perform.  */
323         reloc->offset += i915_gem_obj_ggtt_offset(obj);
324         reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325                         reloc->offset & PAGE_MASK);
326         reloc_entry = (uint32_t __iomem *)
327                 (reloc_page + offset_in_page(reloc->offset));
328         iowrite32(lower_32_bits(delta), reloc_entry);
329
330         if (INTEL_INFO(dev)->gen >= 8) {
331                 reloc_entry += 1;
332
333                 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
334                         io_mapping_unmap_atomic(reloc_page);
335                         reloc_page = io_mapping_map_atomic_wc(
336                                         dev_priv->gtt.mappable,
337                                         reloc->offset + sizeof(uint32_t));
338                         reloc_entry = reloc_page;
339                 }
340
341                 iowrite32(upper_32_bits(delta), reloc_entry);
342         }
343
344         io_mapping_unmap_atomic(reloc_page);
345
346         return 0;
347 }
348
349 static int
350 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
351                                    struct eb_vmas *eb,
352                                    struct drm_i915_gem_relocation_entry *reloc)
353 {
354         struct drm_device *dev = obj->base.dev;
355         struct drm_gem_object *target_obj;
356         struct drm_i915_gem_object *target_i915_obj;
357         struct i915_vma *target_vma;
358         uint64_t target_offset;
359         int ret;
360
361         /* we've already hold a reference to all valid objects */
362         target_vma = eb_get_vma(eb, reloc->target_handle);
363         if (unlikely(target_vma == NULL))
364                 return -ENOENT;
365         target_i915_obj = target_vma->obj;
366         target_obj = &target_vma->obj->base;
367
368         target_offset = target_vma->node.start;
369
370         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
371          * pipe_control writes because the gpu doesn't properly redirect them
372          * through the ppgtt for non_secure batchbuffers. */
373         if (unlikely(IS_GEN6(dev) &&
374             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
375             !target_i915_obj->has_global_gtt_mapping)) {
376                 struct i915_vma *vma =
377                         list_first_entry(&target_i915_obj->vma_list,
378                                          typeof(*vma), vma_link);
379                 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
380         }
381
382         /* Validate that the target is in a valid r/w GPU domain */
383         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
384                 DRM_DEBUG("reloc with multiple write domains: "
385                           "obj %p target %d offset %d "
386                           "read %08x write %08x",
387                           obj, reloc->target_handle,
388                           (int) reloc->offset,
389                           reloc->read_domains,
390                           reloc->write_domain);
391                 return -EINVAL;
392         }
393         if (unlikely((reloc->write_domain | reloc->read_domains)
394                      & ~I915_GEM_GPU_DOMAINS)) {
395                 DRM_DEBUG("reloc with read/write non-GPU domains: "
396                           "obj %p target %d offset %d "
397                           "read %08x write %08x",
398                           obj, reloc->target_handle,
399                           (int) reloc->offset,
400                           reloc->read_domains,
401                           reloc->write_domain);
402                 return -EINVAL;
403         }
404
405         target_obj->pending_read_domains |= reloc->read_domains;
406         target_obj->pending_write_domain |= reloc->write_domain;
407
408         /* If the relocation already has the right value in it, no
409          * more work needs to be done.
410          */
411         if (target_offset == reloc->presumed_offset)
412                 return 0;
413
414         /* Check that the relocation address is valid... */
415         if (unlikely(reloc->offset >
416                 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
417                 DRM_DEBUG("Relocation beyond object bounds: "
418                           "obj %p target %d offset %d size %d.\n",
419                           obj, reloc->target_handle,
420                           (int) reloc->offset,
421                           (int) obj->base.size);
422                 return -EINVAL;
423         }
424         if (unlikely(reloc->offset & 3)) {
425                 DRM_DEBUG("Relocation not 4-byte aligned: "
426                           "obj %p target %d offset %d.\n",
427                           obj, reloc->target_handle,
428                           (int) reloc->offset);
429                 return -EINVAL;
430         }
431
432         /* We can't wait for rendering with pagefaults disabled */
433         if (obj->active && in_atomic())
434                 return -EFAULT;
435
436         if (use_cpu_reloc(obj))
437                 ret = relocate_entry_cpu(obj, reloc, target_offset);
438         else
439                 ret = relocate_entry_gtt(obj, reloc, target_offset);
440
441         if (ret)
442                 return ret;
443
444         /* and update the user's relocation entry */
445         reloc->presumed_offset = target_offset;
446
447         return 0;
448 }
449
450 static int
451 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
452                                  struct eb_vmas *eb)
453 {
454 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
455         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
456         struct drm_i915_gem_relocation_entry __user *user_relocs;
457         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
458         int remain, ret;
459
460         user_relocs = to_user_ptr(entry->relocs_ptr);
461
462         remain = entry->relocation_count;
463         while (remain) {
464                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
465                 int count = remain;
466                 if (count > ARRAY_SIZE(stack_reloc))
467                         count = ARRAY_SIZE(stack_reloc);
468                 remain -= count;
469
470                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
471                         return -EFAULT;
472
473                 do {
474                         u64 offset = r->presumed_offset;
475
476                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
477                         if (ret)
478                                 return ret;
479
480                         if (r->presumed_offset != offset &&
481                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
482                                                     &r->presumed_offset,
483                                                     sizeof(r->presumed_offset))) {
484                                 return -EFAULT;
485                         }
486
487                         user_relocs++;
488                         r++;
489                 } while (--count);
490         }
491
492         return 0;
493 #undef N_RELOC
494 }
495
496 static int
497 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
498                                       struct eb_vmas *eb,
499                                       struct drm_i915_gem_relocation_entry *relocs)
500 {
501         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
502         int i, ret;
503
504         for (i = 0; i < entry->relocation_count; i++) {
505                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
506                 if (ret)
507                         return ret;
508         }
509
510         return 0;
511 }
512
513 static int
514 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
515 {
516         struct i915_vma *vma;
517         int ret = 0;
518
519         /* This is the fast path and we cannot handle a pagefault whilst
520          * holding the struct mutex lest the user pass in the relocations
521          * contained within a mmaped bo. For in such a case we, the page
522          * fault handler would call i915_gem_fault() and we would try to
523          * acquire the struct mutex again. Obviously this is bad and so
524          * lockdep complains vehemently.
525          */
526         pagefault_disable();
527         list_for_each_entry(vma, &eb->vmas, exec_list) {
528                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
529                 if (ret)
530                         break;
531         }
532         pagefault_enable();
533
534         return ret;
535 }
536
537 static int
538 need_reloc_mappable(struct i915_vma *vma)
539 {
540         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
541         return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
542                 i915_is_ggtt(vma->vm);
543 }
544
545 static int
546 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
547                                 struct intel_engine_cs *ring,
548                                 bool *need_reloc)
549 {
550         struct drm_i915_gem_object *obj = vma->obj;
551         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
552         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
553         bool need_fence;
554         uint64_t flags;
555         int ret;
556
557         flags = 0;
558
559         need_fence =
560                 has_fenced_gpu_access &&
561                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
562                 obj->tiling_mode != I915_TILING_NONE;
563         if (need_fence || need_reloc_mappable(vma))
564                 flags |= PIN_MAPPABLE;
565
566         if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
567                 flags |= PIN_GLOBAL;
568         if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
569                 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
570
571         ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
572         if (ret)
573                 return ret;
574
575         entry->flags |= __EXEC_OBJECT_HAS_PIN;
576
577         if (has_fenced_gpu_access) {
578                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
579                         ret = i915_gem_object_get_fence(obj);
580                         if (ret)
581                                 return ret;
582
583                         if (i915_gem_object_pin_fence(obj))
584                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
585
586                         obj->pending_fenced_gpu_access = true;
587                 }
588         }
589
590         if (entry->offset != vma->node.start) {
591                 entry->offset = vma->node.start;
592                 *need_reloc = true;
593         }
594
595         if (entry->flags & EXEC_OBJECT_WRITE) {
596                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
597                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
598         }
599
600         return 0;
601 }
602
603 static bool
604 eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
605 {
606         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607         struct drm_i915_gem_object *obj = vma->obj;
608         bool need_fence, need_mappable;
609
610         need_fence =
611                 has_fenced_gpu_access &&
612                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
613                 obj->tiling_mode != I915_TILING_NONE;
614         need_mappable = need_fence || need_reloc_mappable(vma);
615
616         WARN_ON((need_mappable || need_fence) &&
617                !i915_is_ggtt(vma->vm));
618
619         if (entry->alignment &&
620             vma->node.start & (entry->alignment - 1))
621                 return true;
622
623         if (need_mappable && !obj->map_and_fenceable)
624                 return true;
625
626         if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
627             vma->node.start < BATCH_OFFSET_BIAS)
628                 return true;
629
630         return false;
631 }
632
633 static int
634 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
635                             struct list_head *vmas,
636                             bool *need_relocs)
637 {
638         struct drm_i915_gem_object *obj;
639         struct i915_vma *vma;
640         struct i915_address_space *vm;
641         struct list_head ordered_vmas;
642         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
643         int retry;
644
645         if (list_empty(vmas))
646                 return 0;
647
648         i915_gem_retire_requests_ring(ring);
649
650         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
651
652         INIT_LIST_HEAD(&ordered_vmas);
653         while (!list_empty(vmas)) {
654                 struct drm_i915_gem_exec_object2 *entry;
655                 bool need_fence, need_mappable;
656
657                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
658                 obj = vma->obj;
659                 entry = vma->exec_entry;
660
661                 need_fence =
662                         has_fenced_gpu_access &&
663                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
664                         obj->tiling_mode != I915_TILING_NONE;
665                 need_mappable = need_fence || need_reloc_mappable(vma);
666
667                 if (need_mappable)
668                         list_move(&vma->exec_list, &ordered_vmas);
669                 else
670                         list_move_tail(&vma->exec_list, &ordered_vmas);
671
672                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
673                 obj->base.pending_write_domain = 0;
674                 obj->pending_fenced_gpu_access = false;
675         }
676         list_splice(&ordered_vmas, vmas);
677
678         /* Attempt to pin all of the buffers into the GTT.
679          * This is done in 3 phases:
680          *
681          * 1a. Unbind all objects that do not match the GTT constraints for
682          *     the execbuffer (fenceable, mappable, alignment etc).
683          * 1b. Increment pin count for already bound objects.
684          * 2.  Bind new objects.
685          * 3.  Decrement pin count.
686          *
687          * This avoid unnecessary unbinding of later objects in order to make
688          * room for the earlier objects *unless* we need to defragment.
689          */
690         retry = 0;
691         do {
692                 int ret = 0;
693
694                 /* Unbind any ill-fitting objects or pin. */
695                 list_for_each_entry(vma, vmas, exec_list) {
696                         if (!drm_mm_node_allocated(&vma->node))
697                                 continue;
698
699                         if (eb_vma_misplaced(vma, has_fenced_gpu_access))
700                                 ret = i915_vma_unbind(vma);
701                         else
702                                 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
703                         if (ret)
704                                 goto err;
705                 }
706
707                 /* Bind fresh objects */
708                 list_for_each_entry(vma, vmas, exec_list) {
709                         if (drm_mm_node_allocated(&vma->node))
710                                 continue;
711
712                         ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
713                         if (ret)
714                                 goto err;
715                 }
716
717 err:
718                 if (ret != -ENOSPC || retry++)
719                         return ret;
720
721                 /* Decrement pin count for bound objects */
722                 list_for_each_entry(vma, vmas, exec_list)
723                         i915_gem_execbuffer_unreserve_vma(vma);
724
725                 ret = i915_gem_evict_vm(vm, true);
726                 if (ret)
727                         return ret;
728         } while (1);
729 }
730
731 static int
732 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
733                                   struct drm_i915_gem_execbuffer2 *args,
734                                   struct drm_file *file,
735                                   struct intel_engine_cs *ring,
736                                   struct eb_vmas *eb,
737                                   struct drm_i915_gem_exec_object2 *exec)
738 {
739         struct drm_i915_gem_relocation_entry *reloc;
740         struct i915_address_space *vm;
741         struct i915_vma *vma;
742         bool need_relocs;
743         int *reloc_offset;
744         int i, total, ret;
745         unsigned count = args->buffer_count;
746
747         if (WARN_ON(list_empty(&eb->vmas)))
748                 return 0;
749
750         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
751
752         /* We may process another execbuffer during the unlock... */
753         while (!list_empty(&eb->vmas)) {
754                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
755                 list_del_init(&vma->exec_list);
756                 i915_gem_execbuffer_unreserve_vma(vma);
757                 drm_gem_object_unreference(&vma->obj->base);
758         }
759
760         mutex_unlock(&dev->struct_mutex);
761
762         total = 0;
763         for (i = 0; i < count; i++)
764                 total += exec[i].relocation_count;
765
766         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
767         reloc = drm_malloc_ab(total, sizeof(*reloc));
768         if (reloc == NULL || reloc_offset == NULL) {
769                 drm_free_large(reloc);
770                 drm_free_large(reloc_offset);
771                 mutex_lock(&dev->struct_mutex);
772                 return -ENOMEM;
773         }
774
775         total = 0;
776         for (i = 0; i < count; i++) {
777                 struct drm_i915_gem_relocation_entry __user *user_relocs;
778                 u64 invalid_offset = (u64)-1;
779                 int j;
780
781                 user_relocs = to_user_ptr(exec[i].relocs_ptr);
782
783                 if (copy_from_user(reloc+total, user_relocs,
784                                    exec[i].relocation_count * sizeof(*reloc))) {
785                         ret = -EFAULT;
786                         mutex_lock(&dev->struct_mutex);
787                         goto err;
788                 }
789
790                 /* As we do not update the known relocation offsets after
791                  * relocating (due to the complexities in lock handling),
792                  * we need to mark them as invalid now so that we force the
793                  * relocation processing next time. Just in case the target
794                  * object is evicted and then rebound into its old
795                  * presumed_offset before the next execbuffer - if that
796                  * happened we would make the mistake of assuming that the
797                  * relocations were valid.
798                  */
799                 for (j = 0; j < exec[i].relocation_count; j++) {
800                         if (__copy_to_user(&user_relocs[j].presumed_offset,
801                                            &invalid_offset,
802                                            sizeof(invalid_offset))) {
803                                 ret = -EFAULT;
804                                 mutex_lock(&dev->struct_mutex);
805                                 goto err;
806                         }
807                 }
808
809                 reloc_offset[i] = total;
810                 total += exec[i].relocation_count;
811         }
812
813         ret = i915_mutex_lock_interruptible(dev);
814         if (ret) {
815                 mutex_lock(&dev->struct_mutex);
816                 goto err;
817         }
818
819         /* reacquire the objects */
820         eb_reset(eb);
821         ret = eb_lookup_vmas(eb, exec, args, vm, file);
822         if (ret)
823                 goto err;
824
825         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
826         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
827         if (ret)
828                 goto err;
829
830         list_for_each_entry(vma, &eb->vmas, exec_list) {
831                 int offset = vma->exec_entry - exec;
832                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
833                                                             reloc + reloc_offset[offset]);
834                 if (ret)
835                         goto err;
836         }
837
838         /* Leave the user relocations as are, this is the painfully slow path,
839          * and we want to avoid the complication of dropping the lock whilst
840          * having buffers reserved in the aperture and so causing spurious
841          * ENOSPC for random operations.
842          */
843
844 err:
845         drm_free_large(reloc);
846         drm_free_large(reloc_offset);
847         return ret;
848 }
849
850 static int
851 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
852                                 struct list_head *vmas)
853 {
854         struct i915_vma *vma;
855         uint32_t flush_domains = 0;
856         bool flush_chipset = false;
857         int ret;
858
859         list_for_each_entry(vma, vmas, exec_list) {
860                 struct drm_i915_gem_object *obj = vma->obj;
861                 ret = i915_gem_object_sync(obj, ring);
862                 if (ret)
863                         return ret;
864
865                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
866                         flush_chipset |= i915_gem_clflush_object(obj, false);
867
868                 flush_domains |= obj->base.write_domain;
869         }
870
871         if (flush_chipset)
872                 i915_gem_chipset_flush(ring->dev);
873
874         if (flush_domains & I915_GEM_DOMAIN_GTT)
875                 wmb();
876
877         /* Unconditionally invalidate gpu caches and ensure that we do flush
878          * any residual writes from the previous batch.
879          */
880         return intel_ring_invalidate_all_caches(ring);
881 }
882
883 static bool
884 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
885 {
886         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
887                 return false;
888
889         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
890 }
891
892 static int
893 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
894                    int count)
895 {
896         int i;
897         unsigned relocs_total = 0;
898         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
899
900         for (i = 0; i < count; i++) {
901                 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
902                 int length; /* limited by fault_in_pages_readable() */
903
904                 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
905                         return -EINVAL;
906
907                 /* First check for malicious input causing overflow in
908                  * the worst case where we need to allocate the entire
909                  * relocation tree as a single array.
910                  */
911                 if (exec[i].relocation_count > relocs_max - relocs_total)
912                         return -EINVAL;
913                 relocs_total += exec[i].relocation_count;
914
915                 length = exec[i].relocation_count *
916                         sizeof(struct drm_i915_gem_relocation_entry);
917                 /*
918                  * We must check that the entire relocation array is safe
919                  * to read, but since we may need to update the presumed
920                  * offsets during execution, check for full write access.
921                  */
922                 if (!access_ok(VERIFY_WRITE, ptr, length))
923                         return -EFAULT;
924
925                 if (likely(!i915.prefault_disable)) {
926                         if (fault_in_multipages_readable(ptr, length))
927                                 return -EFAULT;
928                 }
929         }
930
931         return 0;
932 }
933
934 static struct intel_context *
935 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
936                           struct intel_engine_cs *ring, const u32 ctx_id)
937 {
938         struct intel_context *ctx = NULL;
939         struct i915_ctx_hang_stats *hs;
940
941         if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
942                 return ERR_PTR(-EINVAL);
943
944         ctx = i915_gem_context_get(file->driver_priv, ctx_id);
945         if (IS_ERR(ctx))
946                 return ctx;
947
948         hs = &ctx->hang_stats;
949         if (hs->banned) {
950                 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
951                 return ERR_PTR(-EIO);
952         }
953
954         return ctx;
955 }
956
957 static void
958 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
959                                    struct intel_engine_cs *ring)
960 {
961         struct i915_vma *vma;
962
963         list_for_each_entry(vma, vmas, exec_list) {
964                 struct drm_i915_gem_object *obj = vma->obj;
965                 u32 old_read = obj->base.read_domains;
966                 u32 old_write = obj->base.write_domain;
967
968                 obj->base.write_domain = obj->base.pending_write_domain;
969                 if (obj->base.write_domain == 0)
970                         obj->base.pending_read_domains |= obj->base.read_domains;
971                 obj->base.read_domains = obj->base.pending_read_domains;
972                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
973
974                 i915_vma_move_to_active(vma, ring);
975                 if (obj->base.write_domain) {
976                         obj->dirty = 1;
977                         obj->last_write_seqno = intel_ring_get_seqno(ring);
978
979                         intel_fb_obj_invalidate(obj, ring);
980
981                         /* update for the implicit flush after a batch */
982                         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
983                 }
984
985                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
986         }
987 }
988
989 static void
990 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991                                     struct drm_file *file,
992                                     struct intel_engine_cs *ring,
993                                     struct drm_i915_gem_object *obj)
994 {
995         /* Unconditionally force add_request to emit a full flush. */
996         ring->gpu_caches_dirty = true;
997
998         /* Add a breadcrumb for the completion of the batch buffer */
999         (void)__i915_add_request(ring, file, obj, NULL);
1000 }
1001
1002 static int
1003 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1004                             struct intel_engine_cs *ring)
1005 {
1006         struct drm_i915_private *dev_priv = dev->dev_private;
1007         int ret, i;
1008
1009         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1010                 DRM_DEBUG("sol reset is gen7/rcs only\n");
1011                 return -EINVAL;
1012         }
1013
1014         ret = intel_ring_begin(ring, 4 * 3);
1015         if (ret)
1016                 return ret;
1017
1018         for (i = 0; i < 4; i++) {
1019                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1021                 intel_ring_emit(ring, 0);
1022         }
1023
1024         intel_ring_advance(ring);
1025
1026         return 0;
1027 }
1028
1029 /**
1030  * Find one BSD ring to dispatch the corresponding BSD command.
1031  * The Ring ID is returned.
1032  */
1033 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1034                                   struct drm_file *file)
1035 {
1036         struct drm_i915_private *dev_priv = dev->dev_private;
1037         struct drm_i915_file_private *file_priv = file->driver_priv;
1038
1039         /* Check whether the file_priv is using one ring */
1040         if (file_priv->bsd_ring)
1041                 return file_priv->bsd_ring->id;
1042         else {
1043                 /* If no, use the ping-pong mechanism to select one ring */
1044                 int ring_id;
1045
1046                 mutex_lock(&dev->struct_mutex);
1047                 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1048                         ring_id = VCS;
1049                         dev_priv->mm.bsd_ring_dispatch_index = 1;
1050                 } else {
1051                         ring_id = VCS2;
1052                         dev_priv->mm.bsd_ring_dispatch_index = 0;
1053                 }
1054                 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1055                 mutex_unlock(&dev->struct_mutex);
1056                 return ring_id;
1057         }
1058 }
1059
1060 static struct drm_i915_gem_object *
1061 eb_get_batch(struct eb_vmas *eb)
1062 {
1063         struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1064
1065         /*
1066          * SNA is doing fancy tricks with compressing batch buffers, which leads
1067          * to negative relocation deltas. Usually that works out ok since the
1068          * relocate address is still positive, except when the batch is placed
1069          * very low in the GTT. Ensure this doesn't happen.
1070          *
1071          * Note that actual hangs have only been observed on gen7, but for
1072          * paranoia do it everywhere.
1073          */
1074         vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1075
1076         return vma->obj;
1077 }
1078
1079 static int
1080 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1081                        struct drm_file *file,
1082                        struct drm_i915_gem_execbuffer2 *args,
1083                        struct drm_i915_gem_exec_object2 *exec)
1084 {
1085         struct drm_i915_private *dev_priv = dev->dev_private;
1086         struct eb_vmas *eb;
1087         struct drm_i915_gem_object *batch_obj;
1088         struct drm_clip_rect *cliprects = NULL;
1089         struct intel_engine_cs *ring;
1090         struct intel_context *ctx;
1091         struct i915_address_space *vm;
1092         const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1093         u64 exec_start = args->batch_start_offset, exec_len;
1094         u32 mask, flags;
1095         int ret, mode, i;
1096         bool need_relocs;
1097
1098         if (!i915_gem_check_execbuffer(args))
1099                 return -EINVAL;
1100
1101         ret = validate_exec_list(exec, args->buffer_count);
1102         if (ret)
1103                 return ret;
1104
1105         flags = 0;
1106         if (args->flags & I915_EXEC_SECURE) {
1107                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1108                     return -EPERM;
1109
1110                 flags |= I915_DISPATCH_SECURE;
1111         }
1112         if (args->flags & I915_EXEC_IS_PINNED)
1113                 flags |= I915_DISPATCH_PINNED;
1114
1115         if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1116                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1117                           (int)(args->flags & I915_EXEC_RING_MASK));
1118                 return -EINVAL;
1119         }
1120
1121         if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1122                 ring = &dev_priv->ring[RCS];
1123         else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1124                 if (HAS_BSD2(dev)) {
1125                         int ring_id;
1126                         ring_id = gen8_dispatch_bsd_ring(dev, file);
1127                         ring = &dev_priv->ring[ring_id];
1128                 } else
1129                         ring = &dev_priv->ring[VCS];
1130         } else
1131                 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1132
1133         if (!intel_ring_initialized(ring)) {
1134                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1135                           (int)(args->flags & I915_EXEC_RING_MASK));
1136                 return -EINVAL;
1137         }
1138
1139         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1140         mask = I915_EXEC_CONSTANTS_MASK;
1141         switch (mode) {
1142         case I915_EXEC_CONSTANTS_REL_GENERAL:
1143         case I915_EXEC_CONSTANTS_ABSOLUTE:
1144         case I915_EXEC_CONSTANTS_REL_SURFACE:
1145                 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1146                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1147                         return -EINVAL;
1148                 }
1149
1150                 if (mode != dev_priv->relative_constants_mode) {
1151                         if (INTEL_INFO(dev)->gen < 4) {
1152                                 DRM_DEBUG("no rel constants on pre-gen4\n");
1153                                 return -EINVAL;
1154                         }
1155
1156                         if (INTEL_INFO(dev)->gen > 5 &&
1157                             mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1158                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1159                                 return -EINVAL;
1160                         }
1161
1162                         /* The HW changed the meaning on this bit on gen6 */
1163                         if (INTEL_INFO(dev)->gen >= 6)
1164                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1165                 }
1166                 break;
1167         default:
1168                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1169                 return -EINVAL;
1170         }
1171
1172         if (args->buffer_count < 1) {
1173                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1174                 return -EINVAL;
1175         }
1176
1177         if (args->num_cliprects != 0) {
1178                 if (ring != &dev_priv->ring[RCS]) {
1179                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1180                         return -EINVAL;
1181                 }
1182
1183                 if (INTEL_INFO(dev)->gen >= 5) {
1184                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1185                         return -EINVAL;
1186                 }
1187
1188                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1189                         DRM_DEBUG("execbuf with %u cliprects\n",
1190                                   args->num_cliprects);
1191                         return -EINVAL;
1192                 }
1193
1194                 cliprects = kcalloc(args->num_cliprects,
1195                                     sizeof(*cliprects),
1196                                     GFP_KERNEL);
1197                 if (cliprects == NULL) {
1198                         ret = -ENOMEM;
1199                         goto pre_mutex_err;
1200                 }
1201
1202                 if (copy_from_user(cliprects,
1203                                    to_user_ptr(args->cliprects_ptr),
1204                                    sizeof(*cliprects)*args->num_cliprects)) {
1205                         ret = -EFAULT;
1206                         goto pre_mutex_err;
1207                 }
1208         } else {
1209                 if (args->DR4 == 0xffffffff) {
1210                         DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1211                         args->DR4 = 0;
1212                 }
1213
1214                 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1215                         DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1216                         return -EINVAL;
1217                 }
1218         }
1219
1220         intel_runtime_pm_get(dev_priv);
1221
1222         ret = i915_mutex_lock_interruptible(dev);
1223         if (ret)
1224                 goto pre_mutex_err;
1225
1226         if (dev_priv->ums.mm_suspended) {
1227                 mutex_unlock(&dev->struct_mutex);
1228                 ret = -EBUSY;
1229                 goto pre_mutex_err;
1230         }
1231
1232         ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1233         if (IS_ERR(ctx)) {
1234                 mutex_unlock(&dev->struct_mutex);
1235                 ret = PTR_ERR(ctx);
1236                 goto pre_mutex_err;
1237         }
1238
1239         i915_gem_context_reference(ctx);
1240
1241         vm = ctx->vm;
1242         if (!USES_FULL_PPGTT(dev))
1243                 vm = &dev_priv->gtt.base;
1244
1245         eb = eb_create(args);
1246         if (eb == NULL) {
1247                 i915_gem_context_unreference(ctx);
1248                 mutex_unlock(&dev->struct_mutex);
1249                 ret = -ENOMEM;
1250                 goto pre_mutex_err;
1251         }
1252
1253         /* Look up object handles */
1254         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1255         if (ret)
1256                 goto err;
1257
1258         /* take note of the batch buffer before we might reorder the lists */
1259         batch_obj = eb_get_batch(eb);
1260
1261         /* Move the objects en-masse into the GTT, evicting if necessary. */
1262         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1263         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1264         if (ret)
1265                 goto err;
1266
1267         /* The objects are in their final locations, apply the relocations. */
1268         if (need_relocs)
1269                 ret = i915_gem_execbuffer_relocate(eb);
1270         if (ret) {
1271                 if (ret == -EFAULT) {
1272                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1273                                                                 eb, exec);
1274                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1275                 }
1276                 if (ret)
1277                         goto err;
1278         }
1279
1280         /* Set the pending read domains for the batch buffer to COMMAND */
1281         if (batch_obj->base.pending_write_domain) {
1282                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1283                 ret = -EINVAL;
1284                 goto err;
1285         }
1286         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1287
1288         if (i915_needs_cmd_parser(ring)) {
1289                 ret = i915_parse_cmds(ring,
1290                                       batch_obj,
1291                                       args->batch_start_offset,
1292                                       file->is_master);
1293                 if (ret)
1294                         goto err;
1295
1296                 /*
1297                  * XXX: Actually do this when enabling batch copy...
1298                  *
1299                  * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1300                  * from MI_BATCH_BUFFER_START commands issued in the
1301                  * dispatch_execbuffer implementations. We specifically don't
1302                  * want that set when the command parser is enabled.
1303                  */
1304         }
1305
1306         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1307          * batch" bit. Hence we need to pin secure batches into the global gtt.
1308          * hsw should have this fixed, but bdw mucks it up again. */
1309         if (flags & I915_DISPATCH_SECURE &&
1310             !batch_obj->has_global_gtt_mapping) {
1311                 /* When we have multiple VMs, we'll need to make sure that we
1312                  * allocate space first */
1313                 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1314                 BUG_ON(!vma);
1315                 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1316         }
1317
1318         if (flags & I915_DISPATCH_SECURE)
1319                 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1320         else
1321                 exec_start += i915_gem_obj_offset(batch_obj, vm);
1322
1323         ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1324         if (ret)
1325                 goto err;
1326
1327         ret = i915_switch_context(ring, ctx);
1328         if (ret)
1329                 goto err;
1330
1331         if (ring == &dev_priv->ring[RCS] &&
1332             mode != dev_priv->relative_constants_mode) {
1333                 ret = intel_ring_begin(ring, 4);
1334                 if (ret)
1335                                 goto err;
1336
1337                 intel_ring_emit(ring, MI_NOOP);
1338                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1339                 intel_ring_emit(ring, INSTPM);
1340                 intel_ring_emit(ring, mask << 16 | mode);
1341                 intel_ring_advance(ring);
1342
1343                 dev_priv->relative_constants_mode = mode;
1344         }
1345
1346         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1347                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1348                 if (ret)
1349                         goto err;
1350         }
1351
1352
1353         exec_len = args->batch_len;
1354         if (cliprects) {
1355                 for (i = 0; i < args->num_cliprects; i++) {
1356                         ret = i915_emit_box(dev, &cliprects[i],
1357                                             args->DR1, args->DR4);
1358                         if (ret)
1359                                 goto err;
1360
1361                         ret = ring->dispatch_execbuffer(ring,
1362                                                         exec_start, exec_len,
1363                                                         flags);
1364                         if (ret)
1365                                 goto err;
1366                 }
1367         } else {
1368                 ret = ring->dispatch_execbuffer(ring,
1369                                                 exec_start, exec_len,
1370                                                 flags);
1371                 if (ret)
1372                         goto err;
1373         }
1374
1375         trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1376
1377         i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1378         i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1379
1380 err:
1381         /* the request owns the ref now */
1382         i915_gem_context_unreference(ctx);
1383         eb_destroy(eb);
1384
1385         mutex_unlock(&dev->struct_mutex);
1386
1387 pre_mutex_err:
1388         kfree(cliprects);
1389
1390         /* intel_gpu_busy should also get a ref, so it will free when the device
1391          * is really idle. */
1392         intel_runtime_pm_put(dev_priv);
1393         return ret;
1394 }
1395
1396 /*
1397  * Legacy execbuffer just creates an exec2 list from the original exec object
1398  * list array and passes it to the real function.
1399  */
1400 int
1401 i915_gem_execbuffer(struct drm_device *dev, void *data,
1402                     struct drm_file *file)
1403 {
1404         struct drm_i915_gem_execbuffer *args = data;
1405         struct drm_i915_gem_execbuffer2 exec2;
1406         struct drm_i915_gem_exec_object *exec_list = NULL;
1407         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1408         int ret, i;
1409
1410         if (args->buffer_count < 1) {
1411                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1412                 return -EINVAL;
1413         }
1414
1415         /* Copy in the exec list from userland */
1416         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1417         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1418         if (exec_list == NULL || exec2_list == NULL) {
1419                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1420                           args->buffer_count);
1421                 drm_free_large(exec_list);
1422                 drm_free_large(exec2_list);
1423                 return -ENOMEM;
1424         }
1425         ret = copy_from_user(exec_list,
1426                              to_user_ptr(args->buffers_ptr),
1427                              sizeof(*exec_list) * args->buffer_count);
1428         if (ret != 0) {
1429                 DRM_DEBUG("copy %d exec entries failed %d\n",
1430                           args->buffer_count, ret);
1431                 drm_free_large(exec_list);
1432                 drm_free_large(exec2_list);
1433                 return -EFAULT;
1434         }
1435
1436         for (i = 0; i < args->buffer_count; i++) {
1437                 exec2_list[i].handle = exec_list[i].handle;
1438                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1439                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1440                 exec2_list[i].alignment = exec_list[i].alignment;
1441                 exec2_list[i].offset = exec_list[i].offset;
1442                 if (INTEL_INFO(dev)->gen < 4)
1443                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1444                 else
1445                         exec2_list[i].flags = 0;
1446         }
1447
1448         exec2.buffers_ptr = args->buffers_ptr;
1449         exec2.buffer_count = args->buffer_count;
1450         exec2.batch_start_offset = args->batch_start_offset;
1451         exec2.batch_len = args->batch_len;
1452         exec2.DR1 = args->DR1;
1453         exec2.DR4 = args->DR4;
1454         exec2.num_cliprects = args->num_cliprects;
1455         exec2.cliprects_ptr = args->cliprects_ptr;
1456         exec2.flags = I915_EXEC_RENDER;
1457         i915_execbuffer2_set_context_id(exec2, 0);
1458
1459         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1460         if (!ret) {
1461                 struct drm_i915_gem_exec_object __user *user_exec_list =
1462                         to_user_ptr(args->buffers_ptr);
1463
1464                 /* Copy the new buffer offsets back to the user's exec list. */
1465                 for (i = 0; i < args->buffer_count; i++) {
1466                         ret = __copy_to_user(&user_exec_list[i].offset,
1467                                              &exec2_list[i].offset,
1468                                              sizeof(user_exec_list[i].offset));
1469                         if (ret) {
1470                                 ret = -EFAULT;
1471                                 DRM_DEBUG("failed to copy %d exec entries "
1472                                           "back to user (%d)\n",
1473                                           args->buffer_count, ret);
1474                                 break;
1475                         }
1476                 }
1477         }
1478
1479         drm_free_large(exec_list);
1480         drm_free_large(exec2_list);
1481         return ret;
1482 }
1483
1484 int
1485 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1486                      struct drm_file *file)
1487 {
1488         struct drm_i915_gem_execbuffer2 *args = data;
1489         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1490         int ret;
1491
1492         if (args->buffer_count < 1 ||
1493             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1494                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1495                 return -EINVAL;
1496         }
1497
1498         if (args->rsvd2 != 0) {
1499                 DRM_DEBUG("dirty rvsd2 field\n");
1500                 return -EINVAL;
1501         }
1502
1503         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1504                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1505         if (exec2_list == NULL)
1506                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1507                                            args->buffer_count);
1508         if (exec2_list == NULL) {
1509                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1510                           args->buffer_count);
1511                 return -ENOMEM;
1512         }
1513         ret = copy_from_user(exec2_list,
1514                              to_user_ptr(args->buffers_ptr),
1515                              sizeof(*exec2_list) * args->buffer_count);
1516         if (ret != 0) {
1517                 DRM_DEBUG("copy %d exec entries failed %d\n",
1518                           args->buffer_count, ret);
1519                 drm_free_large(exec2_list);
1520                 return -EFAULT;
1521         }
1522
1523         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1524         if (!ret) {
1525                 /* Copy the new buffer offsets back to the user's exec list. */
1526                 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1527                                    to_user_ptr(args->buffers_ptr);
1528                 int i;
1529
1530                 for (i = 0; i < args->buffer_count; i++) {
1531                         ret = __copy_to_user(&user_exec_list[i].offset,
1532                                              &exec2_list[i].offset,
1533                                              sizeof(user_exec_list[i].offset));
1534                         if (ret) {
1535                                 ret = -EFAULT;
1536                                 DRM_DEBUG("failed to copy %d exec entries "
1537                                           "back to user\n",
1538                                           args->buffer_count);
1539                                 break;
1540                         }
1541                 }
1542         }
1543
1544         drm_free_large(exec2_list);
1545         return ret;
1546 }