2f8496d48c94d9d8a632795c7cca78b5517fd16d
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/fence-array.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33
34 /*
35  * GPUVM
36  * GPUVM is similar to the legacy gart on older asics, however
37  * rather than there being a single global gart table
38  * for the entire GPU, there are multiple VM page tables active
39  * at any given time.  The VM page tables can contain a mix
40  * vram pages and system memory pages and system memory pages
41  * can be mapped as snooped (cached system pages) or unsnooped
42  * (uncached system pages).
43  * Each VM has an ID associated with it and there is a page table
44  * associated with each VMID.  When execting a command buffer,
45  * the kernel tells the the ring what VMID to use for that command
46  * buffer.  VMIDs are allocated dynamically as commands are submitted.
47  * The userspace drivers maintain their own address space and the kernel
48  * sets up their pages tables accordingly when they submit their
49  * command buffers and a VMID is assigned.
50  * Cayman/Trinity support up to 8 active VMs at any given time;
51  * SI supports 16.
52  */
53
54 /* Special value that no flush is necessary */
55 #define AMDGPU_VM_NO_FLUSH (~0ll)
56
57 /* Local structure. Encapsulate some VM table update parameters to reduce
58  * the number of function parameters
59  */
60 struct amdgpu_vm_update_params {
61         /* address where to copy page table entries from */
62         uint64_t src;
63         /* DMA addresses to use for mapping */
64         dma_addr_t *pages_addr;
65         /* indirect buffer to fill with commands */
66         struct amdgpu_ib *ib;
67 };
68
69 /**
70  * amdgpu_vm_num_pde - return the number of page directory entries
71  *
72  * @adev: amdgpu_device pointer
73  *
74  * Calculate the number of page directory entries.
75  */
76 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
77 {
78         return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
79 }
80
81 /**
82  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
83  *
84  * @adev: amdgpu_device pointer
85  *
86  * Calculate the size of the page directory in bytes.
87  */
88 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
89 {
90         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
91 }
92
93 /**
94  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
95  *
96  * @vm: vm providing the BOs
97  * @validated: head of validation list
98  * @entry: entry to add
99  *
100  * Add the page directory to the list of BOs to
101  * validate for command submission.
102  */
103 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
104                          struct list_head *validated,
105                          struct amdgpu_bo_list_entry *entry)
106 {
107         entry->robj = vm->page_directory;
108         entry->priority = 0;
109         entry->tv.bo = &vm->page_directory->tbo;
110         entry->tv.shared = true;
111         entry->user_pages = NULL;
112         list_add(&entry->tv.head, validated);
113 }
114
115 /**
116  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
117  *
118  * @adev: amdgpu device pointer
119  * @vm: vm providing the BOs
120  * @duplicates: head of duplicates list
121  *
122  * Add the page directory to the BO duplicates list
123  * for command submission.
124  */
125 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
126                           struct list_head *duplicates)
127 {
128         uint64_t num_evictions;
129         unsigned i;
130
131         /* We only need to validate the page tables
132          * if they aren't already valid.
133          */
134         num_evictions = atomic64_read(&adev->num_evictions);
135         if (num_evictions == vm->last_eviction_counter)
136                 return;
137
138         /* add the vm page table to the list */
139         for (i = 0; i <= vm->max_pde_used; ++i) {
140                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
141
142                 if (!entry->robj)
143                         continue;
144
145                 list_add(&entry->tv.head, duplicates);
146         }
147
148 }
149
150 /**
151  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
152  *
153  * @adev: amdgpu device instance
154  * @vm: vm providing the BOs
155  *
156  * Move the PT BOs to the tail of the LRU.
157  */
158 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
159                                   struct amdgpu_vm *vm)
160 {
161         struct ttm_bo_global *glob = adev->mman.bdev.glob;
162         unsigned i;
163
164         spin_lock(&glob->lru_lock);
165         for (i = 0; i <= vm->max_pde_used; ++i) {
166                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
167
168                 if (!entry->robj)
169                         continue;
170
171                 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
172         }
173         spin_unlock(&glob->lru_lock);
174 }
175
176 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
177                               struct amdgpu_vm_id *id)
178 {
179         return id->current_gpu_reset_count !=
180                 atomic_read(&adev->gpu_reset_counter) ? true : false;
181 }
182
183 /**
184  * amdgpu_vm_grab_id - allocate the next free VMID
185  *
186  * @vm: vm to allocate id for
187  * @ring: ring we want to submit job to
188  * @sync: sync object where we add dependencies
189  * @fence: fence protecting ID from reuse
190  *
191  * Allocate an id for the vm, adding fences to the sync obj as necessary.
192  */
193 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
194                       struct amdgpu_sync *sync, struct fence *fence,
195                       struct amdgpu_job *job)
196 {
197         struct amdgpu_device *adev = ring->adev;
198         struct fence *updates = sync->last_vm_update;
199         struct amdgpu_vm_id *id, *idle;
200         struct fence **fences;
201         unsigned i;
202         int r = 0;
203
204         fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
205                                GFP_KERNEL);
206         if (!fences)
207                 return -ENOMEM;
208
209         mutex_lock(&adev->vm_manager.lock);
210
211         /* Check if we have an idle VMID */
212         i = 0;
213         list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
214                 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
215                 if (!fences[i])
216                         break;
217                 ++i;
218         }
219
220         /* If we can't find a idle VMID to use, wait till one becomes available */
221         if (&idle->list == &adev->vm_manager.ids_lru) {
222                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
223                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
224                 struct fence_array *array;
225                 unsigned j;
226
227                 for (j = 0; j < i; ++j)
228                         fence_get(fences[j]);
229
230                 array = fence_array_create(i, fences, fence_context,
231                                            seqno, true);
232                 if (!array) {
233                         for (j = 0; j < i; ++j)
234                                 fence_put(fences[j]);
235                         kfree(fences);
236                         r = -ENOMEM;
237                         goto error;
238                 }
239
240
241                 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
242                 fence_put(&array->base);
243                 if (r)
244                         goto error;
245
246                 mutex_unlock(&adev->vm_manager.lock);
247                 return 0;
248
249         }
250         kfree(fences);
251
252         job->vm_needs_flush = true;
253         /* Check if we can use a VMID already assigned to this VM */
254         i = ring->idx;
255         do {
256                 struct fence *flushed;
257                 bool same_ring = ring->idx == i;
258
259                 id = vm->ids[i++];
260                 if (i == AMDGPU_MAX_RINGS)
261                         i = 0;
262
263                 /* Check all the prerequisites to using this VMID */
264                 if (!id)
265                         continue;
266                 if (amdgpu_vm_is_gpu_reset(adev, id))
267                         continue;
268
269                 if (atomic64_read(&id->owner) != vm->client_id)
270                         continue;
271
272                 if (job->vm_pd_addr != id->pd_gpu_addr)
273                         continue;
274
275                 if (!same_ring &&
276                     (!id->last_flush || !fence_is_signaled(id->last_flush)))
277                         continue;
278
279                 flushed  = id->flushed_updates;
280                 if (updates &&
281                     (!flushed || fence_is_later(updates, flushed)))
282                         continue;
283
284                 /* Good we can use this VMID. Remember this submission as
285                  * user of the VMID.
286                  */
287                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
288                 if (r)
289                         goto error;
290
291                 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
292                 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
293                 vm->ids[ring->idx] = id;
294
295                 job->vm_id = id - adev->vm_manager.ids;
296                 job->vm_needs_flush = false;
297                 trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
298
299                 mutex_unlock(&adev->vm_manager.lock);
300                 return 0;
301
302         } while (i != ring->idx);
303
304         /* Still no ID to use? Then use the idle one found earlier */
305         id = idle;
306
307         /* Remember this submission as user of the VMID */
308         r = amdgpu_sync_fence(ring->adev, &id->active, fence);
309         if (r)
310                 goto error;
311
312         fence_put(id->first);
313         id->first = fence_get(fence);
314
315         fence_put(id->last_flush);
316         id->last_flush = NULL;
317
318         fence_put(id->flushed_updates);
319         id->flushed_updates = fence_get(updates);
320
321         id->pd_gpu_addr = job->vm_pd_addr;
322         id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
323         list_move_tail(&id->list, &adev->vm_manager.ids_lru);
324         atomic64_set(&id->owner, vm->client_id);
325         vm->ids[ring->idx] = id;
326
327         job->vm_id = id - adev->vm_manager.ids;
328         trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
329
330 error:
331         mutex_unlock(&adev->vm_manager.lock);
332         return r;
333 }
334
335 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
336 {
337         struct amdgpu_device *adev = ring->adev;
338         const struct amdgpu_ip_block_version *ip_block;
339
340         if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
341                 /* only compute rings */
342                 return false;
343
344         ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
345         if (!ip_block)
346                 return false;
347
348         if (ip_block->major <= 7) {
349                 /* gfx7 has no workaround */
350                 return true;
351         } else if (ip_block->major == 8) {
352                 if (adev->gfx.mec_fw_version >= 673)
353                         /* gfx8 is fixed in MEC firmware 673 */
354                         return false;
355                 else
356                         return true;
357         }
358         return false;
359 }
360
361 /**
362  * amdgpu_vm_flush - hardware flush the vm
363  *
364  * @ring: ring to use for flush
365  * @vm_id: vmid number to use
366  * @pd_addr: address of the page directory
367  *
368  * Emit a VM flush when it is necessary.
369  */
370 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
371 {
372         struct amdgpu_device *adev = ring->adev;
373         struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
374         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
375                 id->gds_base != job->gds_base ||
376                 id->gds_size != job->gds_size ||
377                 id->gws_base != job->gws_base ||
378                 id->gws_size != job->gws_size ||
379                 id->oa_base != job->oa_base ||
380                 id->oa_size != job->oa_size);
381         int r;
382
383         if (ring->funcs->emit_pipeline_sync && (
384             job->vm_needs_flush || gds_switch_needed ||
385             amdgpu_vm_ring_has_compute_vm_bug(ring)))
386                 amdgpu_ring_emit_pipeline_sync(ring);
387
388         if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
389             amdgpu_vm_is_gpu_reset(adev, id))) {
390                 struct fence *fence;
391
392                 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
393                 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
394
395                 r = amdgpu_fence_emit(ring, &fence);
396                 if (r)
397                         return r;
398
399                 mutex_lock(&adev->vm_manager.lock);
400                 fence_put(id->last_flush);
401                 id->last_flush = fence;
402                 mutex_unlock(&adev->vm_manager.lock);
403         }
404
405         if (gds_switch_needed) {
406                 id->gds_base = job->gds_base;
407                 id->gds_size = job->gds_size;
408                 id->gws_base = job->gws_base;
409                 id->gws_size = job->gws_size;
410                 id->oa_base = job->oa_base;
411                 id->oa_size = job->oa_size;
412                 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
413                                             job->gds_base, job->gds_size,
414                                             job->gws_base, job->gws_size,
415                                             job->oa_base, job->oa_size);
416         }
417
418         return 0;
419 }
420
421 /**
422  * amdgpu_vm_reset_id - reset VMID to zero
423  *
424  * @adev: amdgpu device structure
425  * @vm_id: vmid number to use
426  *
427  * Reset saved GDW, GWS and OA to force switch on next flush.
428  */
429 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
430 {
431         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
432
433         id->gds_base = 0;
434         id->gds_size = 0;
435         id->gws_base = 0;
436         id->gws_size = 0;
437         id->oa_base = 0;
438         id->oa_size = 0;
439 }
440
441 /**
442  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
443  *
444  * @vm: requested vm
445  * @bo: requested buffer object
446  *
447  * Find @bo inside the requested vm.
448  * Search inside the @bos vm list for the requested vm
449  * Returns the found bo_va or NULL if none is found
450  *
451  * Object has to be reserved!
452  */
453 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
454                                        struct amdgpu_bo *bo)
455 {
456         struct amdgpu_bo_va *bo_va;
457
458         list_for_each_entry(bo_va, &bo->va, bo_list) {
459                 if (bo_va->vm == vm) {
460                         return bo_va;
461                 }
462         }
463         return NULL;
464 }
465
466 /**
467  * amdgpu_vm_update_pages - helper to call the right asic function
468  *
469  * @adev: amdgpu_device pointer
470  * @vm_update_params: see amdgpu_vm_update_params definition
471  * @pe: addr of the page entry
472  * @addr: dst addr to write into pe
473  * @count: number of page entries to update
474  * @incr: increase next addr by incr bytes
475  * @flags: hw access flags
476  *
477  * Traces the parameters and calls the right asic functions
478  * to setup the page table using the DMA.
479  */
480 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
481                                    struct amdgpu_vm_update_params
482                                         *vm_update_params,
483                                    uint64_t pe, uint64_t addr,
484                                    unsigned count, uint32_t incr,
485                                    uint32_t flags)
486 {
487         trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
488
489         if (vm_update_params->src) {
490                 amdgpu_vm_copy_pte(adev, vm_update_params->ib,
491                         pe, (vm_update_params->src + (addr >> 12) * 8), count);
492
493         } else if (vm_update_params->pages_addr) {
494                 amdgpu_vm_write_pte(adev, vm_update_params->ib,
495                         vm_update_params->pages_addr,
496                         pe, addr, count, incr, flags);
497
498         } else if (count < 3) {
499                 amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
500                                     count, incr, flags);
501
502         } else {
503                 amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
504                                       count, incr, flags);
505         }
506 }
507
508 /**
509  * amdgpu_vm_clear_bo - initially clear the page dir/table
510  *
511  * @adev: amdgpu_device pointer
512  * @bo: bo to clear
513  *
514  * need to reserve bo first before calling it.
515  */
516 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
517                               struct amdgpu_vm *vm,
518                               struct amdgpu_bo *bo)
519 {
520         struct amdgpu_ring *ring;
521         struct fence *fence = NULL;
522         struct amdgpu_job *job;
523         struct amdgpu_vm_update_params vm_update_params;
524         unsigned entries;
525         uint64_t addr;
526         int r;
527
528         memset(&vm_update_params, 0, sizeof(vm_update_params));
529         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
530
531         r = reservation_object_reserve_shared(bo->tbo.resv);
532         if (r)
533                 return r;
534
535         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
536         if (r)
537                 goto error;
538
539         addr = amdgpu_bo_gpu_offset(bo);
540         entries = amdgpu_bo_size(bo) / 8;
541
542         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
543         if (r)
544                 goto error;
545
546         vm_update_params.ib = &job->ibs[0];
547         amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
548                                0, 0);
549         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
550
551         WARN_ON(job->ibs[0].length_dw > 64);
552         r = amdgpu_job_submit(job, ring, &vm->entity,
553                               AMDGPU_FENCE_OWNER_VM, &fence);
554         if (r)
555                 goto error_free;
556
557         amdgpu_bo_fence(bo, fence, true);
558         fence_put(fence);
559         return 0;
560
561 error_free:
562         amdgpu_job_free(job);
563
564 error:
565         return r;
566 }
567
568 /**
569  * amdgpu_vm_map_gart - Resolve gart mapping of addr
570  *
571  * @pages_addr: optional DMA address to use for lookup
572  * @addr: the unmapped addr
573  *
574  * Look up the physical address of the page that the pte resolves
575  * to and return the pointer for the page table entry.
576  */
577 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
578 {
579         uint64_t result;
580
581         if (pages_addr) {
582                 /* page table offset */
583                 result = pages_addr[addr >> PAGE_SHIFT];
584
585                 /* in case cpu page size != gpu page size*/
586                 result |= addr & (~PAGE_MASK);
587
588         } else {
589                 /* No mapping required */
590                 result = addr;
591         }
592
593         result &= 0xFFFFFFFFFFFFF000ULL;
594
595         return result;
596 }
597
598 /**
599  * amdgpu_vm_update_pdes - make sure that page directory is valid
600  *
601  * @adev: amdgpu_device pointer
602  * @vm: requested vm
603  * @start: start of GPU address range
604  * @end: end of GPU address range
605  *
606  * Allocates new page tables if necessary
607  * and updates the page directory.
608  * Returns 0 for success, error for failure.
609  */
610 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
611                                     struct amdgpu_vm *vm)
612 {
613         struct amdgpu_ring *ring;
614         struct amdgpu_bo *pd = vm->page_directory;
615         uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
616         uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
617         uint64_t last_pde = ~0, last_pt = ~0;
618         unsigned count = 0, pt_idx, ndw;
619         struct amdgpu_job *job;
620         struct amdgpu_vm_update_params vm_update_params;
621         struct fence *fence = NULL;
622
623         int r;
624
625         memset(&vm_update_params, 0, sizeof(vm_update_params));
626         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
627
628         /* padding, etc. */
629         ndw = 64;
630
631         /* assume the worst case */
632         ndw += vm->max_pde_used * 6;
633
634         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
635         if (r)
636                 return r;
637
638         vm_update_params.ib = &job->ibs[0];
639
640         /* walk over the address space and update the page directory */
641         for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
642                 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
643                 uint64_t pde, pt;
644
645                 if (bo == NULL)
646                         continue;
647
648                 pt = amdgpu_bo_gpu_offset(bo);
649                 if (vm->page_tables[pt_idx].addr == pt)
650                         continue;
651                 vm->page_tables[pt_idx].addr = pt;
652
653                 pde = pd_addr + pt_idx * 8;
654                 if (((last_pde + 8 * count) != pde) ||
655                     ((last_pt + incr * count) != pt)) {
656
657                         if (count) {
658                                 amdgpu_vm_update_pages(adev, &vm_update_params,
659                                                        last_pde, last_pt,
660                                                        count, incr,
661                                                        AMDGPU_PTE_VALID);
662                         }
663
664                         count = 1;
665                         last_pde = pde;
666                         last_pt = pt;
667                 } else {
668                         ++count;
669                 }
670         }
671
672         if (count)
673                 amdgpu_vm_update_pages(adev, &vm_update_params,
674                                         last_pde, last_pt,
675                                         count, incr, AMDGPU_PTE_VALID);
676
677         if (vm_update_params.ib->length_dw != 0) {
678                 amdgpu_ring_pad_ib(ring, vm_update_params.ib);
679                 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
680                                  AMDGPU_FENCE_OWNER_VM);
681                 WARN_ON(vm_update_params.ib->length_dw > ndw);
682                 r = amdgpu_job_submit(job, ring, &vm->entity,
683                                       AMDGPU_FENCE_OWNER_VM, &fence);
684                 if (r)
685                         goto error_free;
686
687                 amdgpu_bo_fence(pd, fence, true);
688                 fence_put(vm->page_directory_fence);
689                 vm->page_directory_fence = fence_get(fence);
690                 fence_put(fence);
691
692         } else {
693                 amdgpu_job_free(job);
694         }
695
696         return 0;
697
698 error_free:
699         amdgpu_job_free(job);
700         return r;
701 }
702
703 /**
704  * amdgpu_vm_frag_ptes - add fragment information to PTEs
705  *
706  * @adev: amdgpu_device pointer
707  * @vm_update_params: see amdgpu_vm_update_params definition
708  * @pe_start: first PTE to handle
709  * @pe_end: last PTE to handle
710  * @addr: addr those PTEs should point to
711  * @flags: hw mapping flags
712  */
713 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
714                                 struct amdgpu_vm_update_params
715                                         *vm_update_params,
716                                 uint64_t pe_start, uint64_t pe_end,
717                                 uint64_t addr, uint32_t flags)
718 {
719         /**
720          * The MC L1 TLB supports variable sized pages, based on a fragment
721          * field in the PTE. When this field is set to a non-zero value, page
722          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
723          * flags are considered valid for all PTEs within the fragment range
724          * and corresponding mappings are assumed to be physically contiguous.
725          *
726          * The L1 TLB can store a single PTE for the whole fragment,
727          * significantly increasing the space available for translation
728          * caching. This leads to large improvements in throughput when the
729          * TLB is under pressure.
730          *
731          * The L2 TLB distributes small and large fragments into two
732          * asymmetric partitions. The large fragment cache is significantly
733          * larger. Thus, we try to use large fragments wherever possible.
734          * Userspace can support this by aligning virtual base address and
735          * allocation size to the fragment size.
736          */
737
738         /* SI and newer are optimized for 64KB */
739         uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
740         uint64_t frag_align = 0x80;
741
742         uint64_t frag_start = ALIGN(pe_start, frag_align);
743         uint64_t frag_end = pe_end & ~(frag_align - 1);
744
745         unsigned count;
746
747         /* Abort early if there isn't anything to do */
748         if (pe_start == pe_end)
749                 return;
750
751         /* system pages are non continuously */
752         if (vm_update_params->src || vm_update_params->pages_addr ||
753                 !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
754
755                 count = (pe_end - pe_start) / 8;
756                 amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
757                                        addr, count, AMDGPU_GPU_PAGE_SIZE,
758                                        flags);
759                 return;
760         }
761
762         /* handle the 4K area at the beginning */
763         if (pe_start != frag_start) {
764                 count = (frag_start - pe_start) / 8;
765                 amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
766                                        count, AMDGPU_GPU_PAGE_SIZE, flags);
767                 addr += AMDGPU_GPU_PAGE_SIZE * count;
768         }
769
770         /* handle the area in the middle */
771         count = (frag_end - frag_start) / 8;
772         amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
773                                AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
774
775         /* handle the 4K area at the end */
776         if (frag_end != pe_end) {
777                 addr += AMDGPU_GPU_PAGE_SIZE * count;
778                 count = (pe_end - frag_end) / 8;
779                 amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
780                                        count, AMDGPU_GPU_PAGE_SIZE, flags);
781         }
782 }
783
784 /**
785  * amdgpu_vm_update_ptes - make sure that page tables are valid
786  *
787  * @adev: amdgpu_device pointer
788  * @vm_update_params: see amdgpu_vm_update_params definition
789  * @vm: requested vm
790  * @start: start of GPU address range
791  * @end: end of GPU address range
792  * @dst: destination address to map to, the next dst inside the function
793  * @flags: mapping flags
794  *
795  * Update the page tables in the range @start - @end.
796  */
797 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
798                                   struct amdgpu_vm_update_params
799                                         *vm_update_params,
800                                   struct amdgpu_vm *vm,
801                                   uint64_t start, uint64_t end,
802                                   uint64_t dst, uint32_t flags)
803 {
804         const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
805
806         uint64_t cur_pe_start, cur_pe_end, cur_dst;
807         uint64_t addr; /* next GPU address to be updated */
808         uint64_t pt_idx;
809         struct amdgpu_bo *pt;
810         unsigned nptes; /* next number of ptes to be updated */
811         uint64_t next_pe_start;
812
813         /* initialize the variables */
814         addr = start;
815         pt_idx = addr >> amdgpu_vm_block_size;
816         pt = vm->page_tables[pt_idx].entry.robj;
817
818         if ((addr & ~mask) == (end & ~mask))
819                 nptes = end - addr;
820         else
821                 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
822
823         cur_pe_start = amdgpu_bo_gpu_offset(pt);
824         cur_pe_start += (addr & mask) * 8;
825         cur_pe_end = cur_pe_start + 8 * nptes;
826         cur_dst = dst;
827
828         /* for next ptb*/
829         addr += nptes;
830         dst += nptes * AMDGPU_GPU_PAGE_SIZE;
831
832         /* walk over the address space and update the page tables */
833         while (addr < end) {
834                 pt_idx = addr >> amdgpu_vm_block_size;
835                 pt = vm->page_tables[pt_idx].entry.robj;
836
837                 if ((addr & ~mask) == (end & ~mask))
838                         nptes = end - addr;
839                 else
840                         nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
841
842                 next_pe_start = amdgpu_bo_gpu_offset(pt);
843                 next_pe_start += (addr & mask) * 8;
844
845                 if (cur_pe_end == next_pe_start) {
846                         /* The next ptb is consecutive to current ptb.
847                          * Don't call amdgpu_vm_frag_ptes now.
848                          * Will update two ptbs together in future.
849                         */
850                         cur_pe_end += 8 * nptes;
851                 } else {
852                         amdgpu_vm_frag_ptes(adev, vm_update_params,
853                                             cur_pe_start, cur_pe_end,
854                                             cur_dst, flags);
855
856                         cur_pe_start = next_pe_start;
857                         cur_pe_end = next_pe_start + 8 * nptes;
858                         cur_dst = dst;
859                 }
860
861                 /* for next ptb*/
862                 addr += nptes;
863                 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
864         }
865
866         amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
867                             cur_pe_end, cur_dst, flags);
868 }
869
870 /**
871  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
872  *
873  * @adev: amdgpu_device pointer
874  * @exclusive: fence we need to sync to
875  * @src: address where to copy page table entries from
876  * @pages_addr: DMA addresses to use for mapping
877  * @vm: requested vm
878  * @start: start of mapped range
879  * @last: last mapped entry
880  * @flags: flags for the entries
881  * @addr: addr to set the area to
882  * @fence: optional resulting fence
883  *
884  * Fill in the page table entries between @start and @last.
885  * Returns 0 for success, -EINVAL for failure.
886  */
887 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
888                                        struct fence *exclusive,
889                                        uint64_t src,
890                                        dma_addr_t *pages_addr,
891                                        struct amdgpu_vm *vm,
892                                        uint64_t start, uint64_t last,
893                                        uint32_t flags, uint64_t addr,
894                                        struct fence **fence)
895 {
896         struct amdgpu_ring *ring;
897         void *owner = AMDGPU_FENCE_OWNER_VM;
898         unsigned nptes, ncmds, ndw;
899         struct amdgpu_job *job;
900         struct amdgpu_vm_update_params vm_update_params;
901         struct fence *f = NULL;
902         int r;
903
904         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
905         memset(&vm_update_params, 0, sizeof(vm_update_params));
906         vm_update_params.src = src;
907         vm_update_params.pages_addr = pages_addr;
908
909         /* sync to everything on unmapping */
910         if (!(flags & AMDGPU_PTE_VALID))
911                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
912
913         nptes = last - start + 1;
914
915         /*
916          * reserve space for one command every (1 << BLOCK_SIZE)
917          *  entries or 2k dwords (whatever is smaller)
918          */
919         ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
920
921         /* padding, etc. */
922         ndw = 64;
923
924         if (vm_update_params.src) {
925                 /* only copy commands needed */
926                 ndw += ncmds * 7;
927
928         } else if (vm_update_params.pages_addr) {
929                 /* header for write data commands */
930                 ndw += ncmds * 4;
931
932                 /* body of write data command */
933                 ndw += nptes * 2;
934
935         } else {
936                 /* set page commands needed */
937                 ndw += ncmds * 10;
938
939                 /* two extra commands for begin/end of fragment */
940                 ndw += 2 * 10;
941         }
942
943         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
944         if (r)
945                 return r;
946
947         vm_update_params.ib = &job->ibs[0];
948
949         r = amdgpu_sync_fence(adev, &job->sync, exclusive);
950         if (r)
951                 goto error_free;
952
953         r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
954                              owner);
955         if (r)
956                 goto error_free;
957
958         r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
959         if (r)
960                 goto error_free;
961
962         amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
963                               last + 1, addr, flags);
964
965         amdgpu_ring_pad_ib(ring, vm_update_params.ib);
966         WARN_ON(vm_update_params.ib->length_dw > ndw);
967         r = amdgpu_job_submit(job, ring, &vm->entity,
968                               AMDGPU_FENCE_OWNER_VM, &f);
969         if (r)
970                 goto error_free;
971
972         amdgpu_bo_fence(vm->page_directory, f, true);
973         if (fence) {
974                 fence_put(*fence);
975                 *fence = fence_get(f);
976         }
977         fence_put(f);
978         return 0;
979
980 error_free:
981         amdgpu_job_free(job);
982         return r;
983 }
984
985 /**
986  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
987  *
988  * @adev: amdgpu_device pointer
989  * @exclusive: fence we need to sync to
990  * @gtt_flags: flags as they are used for GTT
991  * @pages_addr: DMA addresses to use for mapping
992  * @vm: requested vm
993  * @mapping: mapped range and flags to use for the update
994  * @addr: addr to set the area to
995  * @flags: HW flags for the mapping
996  * @fence: optional resulting fence
997  *
998  * Split the mapping into smaller chunks so that each update fits
999  * into a SDMA IB.
1000  * Returns 0 for success, -EINVAL for failure.
1001  */
1002 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1003                                       struct fence *exclusive,
1004                                       uint32_t gtt_flags,
1005                                       dma_addr_t *pages_addr,
1006                                       struct amdgpu_vm *vm,
1007                                       struct amdgpu_bo_va_mapping *mapping,
1008                                       uint32_t flags, uint64_t addr,
1009                                       struct fence **fence)
1010 {
1011         const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
1012
1013         uint64_t src = 0, start = mapping->it.start;
1014         int r;
1015
1016         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1017          * but in case of something, we filter the flags in first place
1018          */
1019         if (!(mapping->flags & AMDGPU_PTE_READABLE))
1020                 flags &= ~AMDGPU_PTE_READABLE;
1021         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1022                 flags &= ~AMDGPU_PTE_WRITEABLE;
1023
1024         trace_amdgpu_vm_bo_update(mapping);
1025
1026         if (pages_addr) {
1027                 if (flags == gtt_flags)
1028                         src = adev->gart.table_addr + (addr >> 12) * 8;
1029                 addr = 0;
1030         }
1031         addr += mapping->offset;
1032
1033         if (!pages_addr || src)
1034                 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1035                                                    src, pages_addr, vm,
1036                                                    start, mapping->it.last,
1037                                                    flags, addr, fence);
1038
1039         while (start != mapping->it.last + 1) {
1040                 uint64_t last;
1041
1042                 last = min((uint64_t)mapping->it.last, start + max_size - 1);
1043                 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1044                                                 src, pages_addr, vm,
1045                                                 start, last, flags, addr,
1046                                                 fence);
1047                 if (r)
1048                         return r;
1049
1050                 start = last + 1;
1051                 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
1052         }
1053
1054         return 0;
1055 }
1056
1057 /**
1058  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1059  *
1060  * @adev: amdgpu_device pointer
1061  * @bo_va: requested BO and VM object
1062  * @mem: ttm mem
1063  *
1064  * Fill in the page table entries for @bo_va.
1065  * Returns 0 for success, -EINVAL for failure.
1066  *
1067  * Object have to be reserved and mutex must be locked!
1068  */
1069 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1070                         struct amdgpu_bo_va *bo_va,
1071                         struct ttm_mem_reg *mem)
1072 {
1073         struct amdgpu_vm *vm = bo_va->vm;
1074         struct amdgpu_bo_va_mapping *mapping;
1075         dma_addr_t *pages_addr = NULL;
1076         uint32_t gtt_flags, flags;
1077         struct fence *exclusive;
1078         uint64_t addr;
1079         int r;
1080
1081         if (mem) {
1082                 struct ttm_dma_tt *ttm;
1083
1084                 addr = (u64)mem->start << PAGE_SHIFT;
1085                 switch (mem->mem_type) {
1086                 case TTM_PL_TT:
1087                         ttm = container_of(bo_va->bo->tbo.ttm, struct
1088                                            ttm_dma_tt, ttm);
1089                         pages_addr = ttm->dma_address;
1090                         break;
1091
1092                 case TTM_PL_VRAM:
1093                         addr += adev->vm_manager.vram_base_offset;
1094                         break;
1095
1096                 default:
1097                         break;
1098                 }
1099
1100                 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1101         } else {
1102                 addr = 0;
1103                 exclusive = NULL;
1104         }
1105
1106         flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1107         gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
1108
1109         spin_lock(&vm->status_lock);
1110         if (!list_empty(&bo_va->vm_status))
1111                 list_splice_init(&bo_va->valids, &bo_va->invalids);
1112         spin_unlock(&vm->status_lock);
1113
1114         list_for_each_entry(mapping, &bo_va->invalids, list) {
1115                 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1116                                                gtt_flags, pages_addr, vm,
1117                                                mapping, flags, addr,
1118                                                &bo_va->last_pt_update);
1119                 if (r)
1120                         return r;
1121         }
1122
1123         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1124                 list_for_each_entry(mapping, &bo_va->valids, list)
1125                         trace_amdgpu_vm_bo_mapping(mapping);
1126
1127                 list_for_each_entry(mapping, &bo_va->invalids, list)
1128                         trace_amdgpu_vm_bo_mapping(mapping);
1129         }
1130
1131         spin_lock(&vm->status_lock);
1132         list_splice_init(&bo_va->invalids, &bo_va->valids);
1133         list_del_init(&bo_va->vm_status);
1134         if (!mem)
1135                 list_add(&bo_va->vm_status, &vm->cleared);
1136         spin_unlock(&vm->status_lock);
1137
1138         return 0;
1139 }
1140
1141 /**
1142  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1143  *
1144  * @adev: amdgpu_device pointer
1145  * @vm: requested vm
1146  *
1147  * Make sure all freed BOs are cleared in the PT.
1148  * Returns 0 for success.
1149  *
1150  * PTs have to be reserved and mutex must be locked!
1151  */
1152 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1153                           struct amdgpu_vm *vm)
1154 {
1155         struct amdgpu_bo_va_mapping *mapping;
1156         int r;
1157
1158         while (!list_empty(&vm->freed)) {
1159                 mapping = list_first_entry(&vm->freed,
1160                         struct amdgpu_bo_va_mapping, list);
1161                 list_del(&mapping->list);
1162
1163                 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1164                                                0, 0, NULL);
1165                 kfree(mapping);
1166                 if (r)
1167                         return r;
1168
1169         }
1170         return 0;
1171
1172 }
1173
1174 /**
1175  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1176  *
1177  * @adev: amdgpu_device pointer
1178  * @vm: requested vm
1179  *
1180  * Make sure all invalidated BOs are cleared in the PT.
1181  * Returns 0 for success.
1182  *
1183  * PTs have to be reserved and mutex must be locked!
1184  */
1185 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1186                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1187 {
1188         struct amdgpu_bo_va *bo_va = NULL;
1189         int r = 0;
1190
1191         spin_lock(&vm->status_lock);
1192         while (!list_empty(&vm->invalidated)) {
1193                 bo_va = list_first_entry(&vm->invalidated,
1194                         struct amdgpu_bo_va, vm_status);
1195                 spin_unlock(&vm->status_lock);
1196
1197                 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1198                 if (r)
1199                         return r;
1200
1201                 spin_lock(&vm->status_lock);
1202         }
1203         spin_unlock(&vm->status_lock);
1204
1205         if (bo_va)
1206                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1207
1208         return r;
1209 }
1210
1211 /**
1212  * amdgpu_vm_bo_add - add a bo to a specific vm
1213  *
1214  * @adev: amdgpu_device pointer
1215  * @vm: requested vm
1216  * @bo: amdgpu buffer object
1217  *
1218  * Add @bo into the requested vm.
1219  * Add @bo to the list of bos associated with the vm
1220  * Returns newly added bo_va or NULL for failure
1221  *
1222  * Object has to be reserved!
1223  */
1224 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1225                                       struct amdgpu_vm *vm,
1226                                       struct amdgpu_bo *bo)
1227 {
1228         struct amdgpu_bo_va *bo_va;
1229
1230         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1231         if (bo_va == NULL) {
1232                 return NULL;
1233         }
1234         bo_va->vm = vm;
1235         bo_va->bo = bo;
1236         bo_va->ref_count = 1;
1237         INIT_LIST_HEAD(&bo_va->bo_list);
1238         INIT_LIST_HEAD(&bo_va->valids);
1239         INIT_LIST_HEAD(&bo_va->invalids);
1240         INIT_LIST_HEAD(&bo_va->vm_status);
1241
1242         list_add_tail(&bo_va->bo_list, &bo->va);
1243
1244         return bo_va;
1245 }
1246
1247 /**
1248  * amdgpu_vm_bo_map - map bo inside a vm
1249  *
1250  * @adev: amdgpu_device pointer
1251  * @bo_va: bo_va to store the address
1252  * @saddr: where to map the BO
1253  * @offset: requested offset in the BO
1254  * @flags: attributes of pages (read/write/valid/etc.)
1255  *
1256  * Add a mapping of the BO at the specefied addr into the VM.
1257  * Returns 0 for success, error for failure.
1258  *
1259  * Object has to be reserved and unreserved outside!
1260  */
1261 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1262                      struct amdgpu_bo_va *bo_va,
1263                      uint64_t saddr, uint64_t offset,
1264                      uint64_t size, uint32_t flags)
1265 {
1266         struct amdgpu_bo_va_mapping *mapping;
1267         struct amdgpu_vm *vm = bo_va->vm;
1268         struct interval_tree_node *it;
1269         unsigned last_pfn, pt_idx;
1270         uint64_t eaddr;
1271         int r;
1272
1273         /* validate the parameters */
1274         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1275             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1276                 return -EINVAL;
1277
1278         /* make sure object fit at this offset */
1279         eaddr = saddr + size - 1;
1280         if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1281                 return -EINVAL;
1282
1283         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1284         if (last_pfn >= adev->vm_manager.max_pfn) {
1285                 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1286                         last_pfn, adev->vm_manager.max_pfn);
1287                 return -EINVAL;
1288         }
1289
1290         saddr /= AMDGPU_GPU_PAGE_SIZE;
1291         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1292
1293         it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1294         if (it) {
1295                 struct amdgpu_bo_va_mapping *tmp;
1296                 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1297                 /* bo and tmp overlap, invalid addr */
1298                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1299                         "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1300                         tmp->it.start, tmp->it.last + 1);
1301                 r = -EINVAL;
1302                 goto error;
1303         }
1304
1305         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1306         if (!mapping) {
1307                 r = -ENOMEM;
1308                 goto error;
1309         }
1310
1311         INIT_LIST_HEAD(&mapping->list);
1312         mapping->it.start = saddr;
1313         mapping->it.last = eaddr;
1314         mapping->offset = offset;
1315         mapping->flags = flags;
1316
1317         list_add(&mapping->list, &bo_va->invalids);
1318         interval_tree_insert(&mapping->it, &vm->va);
1319
1320         /* Make sure the page tables are allocated */
1321         saddr >>= amdgpu_vm_block_size;
1322         eaddr >>= amdgpu_vm_block_size;
1323
1324         BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1325
1326         if (eaddr > vm->max_pde_used)
1327                 vm->max_pde_used = eaddr;
1328
1329         /* walk over the address space and allocate the page tables */
1330         for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1331                 struct reservation_object *resv = vm->page_directory->tbo.resv;
1332                 struct amdgpu_bo_list_entry *entry;
1333                 struct amdgpu_bo *pt;
1334
1335                 entry = &vm->page_tables[pt_idx].entry;
1336                 if (entry->robj)
1337                         continue;
1338
1339                 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1340                                      AMDGPU_GPU_PAGE_SIZE, true,
1341                                      AMDGPU_GEM_DOMAIN_VRAM,
1342                                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1343                                      NULL, resv, &pt);
1344                 if (r)
1345                         goto error_free;
1346
1347                 /* Keep a reference to the page table to avoid freeing
1348                  * them up in the wrong order.
1349                  */
1350                 pt->parent = amdgpu_bo_ref(vm->page_directory);
1351
1352                 r = amdgpu_vm_clear_bo(adev, vm, pt);
1353                 if (r) {
1354                         amdgpu_bo_unref(&pt);
1355                         goto error_free;
1356                 }
1357
1358                 entry->robj = pt;
1359                 entry->priority = 0;
1360                 entry->tv.bo = &entry->robj->tbo;
1361                 entry->tv.shared = true;
1362                 entry->user_pages = NULL;
1363                 vm->page_tables[pt_idx].addr = 0;
1364         }
1365
1366         return 0;
1367
1368 error_free:
1369         list_del(&mapping->list);
1370         interval_tree_remove(&mapping->it, &vm->va);
1371         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1372         kfree(mapping);
1373
1374 error:
1375         return r;
1376 }
1377
1378 /**
1379  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1380  *
1381  * @adev: amdgpu_device pointer
1382  * @bo_va: bo_va to remove the address from
1383  * @saddr: where to the BO is mapped
1384  *
1385  * Remove a mapping of the BO at the specefied addr from the VM.
1386  * Returns 0 for success, error for failure.
1387  *
1388  * Object has to be reserved and unreserved outside!
1389  */
1390 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1391                        struct amdgpu_bo_va *bo_va,
1392                        uint64_t saddr)
1393 {
1394         struct amdgpu_bo_va_mapping *mapping;
1395         struct amdgpu_vm *vm = bo_va->vm;
1396         bool valid = true;
1397
1398         saddr /= AMDGPU_GPU_PAGE_SIZE;
1399
1400         list_for_each_entry(mapping, &bo_va->valids, list) {
1401                 if (mapping->it.start == saddr)
1402                         break;
1403         }
1404
1405         if (&mapping->list == &bo_va->valids) {
1406                 valid = false;
1407
1408                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1409                         if (mapping->it.start == saddr)
1410                                 break;
1411                 }
1412
1413                 if (&mapping->list == &bo_va->invalids)
1414                         return -ENOENT;
1415         }
1416
1417         list_del(&mapping->list);
1418         interval_tree_remove(&mapping->it, &vm->va);
1419         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1420
1421         if (valid)
1422                 list_add(&mapping->list, &vm->freed);
1423         else
1424                 kfree(mapping);
1425
1426         return 0;
1427 }
1428
1429 /**
1430  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1431  *
1432  * @adev: amdgpu_device pointer
1433  * @bo_va: requested bo_va
1434  *
1435  * Remove @bo_va->bo from the requested vm.
1436  *
1437  * Object have to be reserved!
1438  */
1439 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1440                       struct amdgpu_bo_va *bo_va)
1441 {
1442         struct amdgpu_bo_va_mapping *mapping, *next;
1443         struct amdgpu_vm *vm = bo_va->vm;
1444
1445         list_del(&bo_va->bo_list);
1446
1447         spin_lock(&vm->status_lock);
1448         list_del(&bo_va->vm_status);
1449         spin_unlock(&vm->status_lock);
1450
1451         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1452                 list_del(&mapping->list);
1453                 interval_tree_remove(&mapping->it, &vm->va);
1454                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1455                 list_add(&mapping->list, &vm->freed);
1456         }
1457         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1458                 list_del(&mapping->list);
1459                 interval_tree_remove(&mapping->it, &vm->va);
1460                 kfree(mapping);
1461         }
1462
1463         fence_put(bo_va->last_pt_update);
1464         kfree(bo_va);
1465 }
1466
1467 /**
1468  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1469  *
1470  * @adev: amdgpu_device pointer
1471  * @vm: requested vm
1472  * @bo: amdgpu buffer object
1473  *
1474  * Mark @bo as invalid.
1475  */
1476 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1477                              struct amdgpu_bo *bo)
1478 {
1479         struct amdgpu_bo_va *bo_va;
1480
1481         list_for_each_entry(bo_va, &bo->va, bo_list) {
1482                 spin_lock(&bo_va->vm->status_lock);
1483                 if (list_empty(&bo_va->vm_status))
1484                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1485                 spin_unlock(&bo_va->vm->status_lock);
1486         }
1487 }
1488
1489 /**
1490  * amdgpu_vm_init - initialize a vm instance
1491  *
1492  * @adev: amdgpu_device pointer
1493  * @vm: requested vm
1494  *
1495  * Init @vm fields.
1496  */
1497 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1498 {
1499         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1500                 AMDGPU_VM_PTE_COUNT * 8);
1501         unsigned pd_size, pd_entries;
1502         unsigned ring_instance;
1503         struct amdgpu_ring *ring;
1504         struct amd_sched_rq *rq;
1505         int i, r;
1506
1507         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1508                 vm->ids[i] = NULL;
1509         vm->va = RB_ROOT;
1510         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1511         spin_lock_init(&vm->status_lock);
1512         INIT_LIST_HEAD(&vm->invalidated);
1513         INIT_LIST_HEAD(&vm->cleared);
1514         INIT_LIST_HEAD(&vm->freed);
1515
1516         pd_size = amdgpu_vm_directory_size(adev);
1517         pd_entries = amdgpu_vm_num_pdes(adev);
1518
1519         /* allocate page table array */
1520         vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1521         if (vm->page_tables == NULL) {
1522                 DRM_ERROR("Cannot allocate memory for page table array\n");
1523                 return -ENOMEM;
1524         }
1525
1526         /* create scheduler entity for page table updates */
1527
1528         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1529         ring_instance %= adev->vm_manager.vm_pte_num_rings;
1530         ring = adev->vm_manager.vm_pte_rings[ring_instance];
1531         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1532         r = amd_sched_entity_init(&ring->sched, &vm->entity,
1533                                   rq, amdgpu_sched_jobs);
1534         if (r)
1535                 return r;
1536
1537         vm->page_directory_fence = NULL;
1538
1539         r = amdgpu_bo_create(adev, pd_size, align, true,
1540                              AMDGPU_GEM_DOMAIN_VRAM,
1541                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1542                              NULL, NULL, &vm->page_directory);
1543         if (r)
1544                 goto error_free_sched_entity;
1545
1546         r = amdgpu_bo_reserve(vm->page_directory, false);
1547         if (r)
1548                 goto error_free_page_directory;
1549
1550         r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1551         amdgpu_bo_unreserve(vm->page_directory);
1552         if (r)
1553                 goto error_free_page_directory;
1554         vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1555
1556         return 0;
1557
1558 error_free_page_directory:
1559         amdgpu_bo_unref(&vm->page_directory);
1560         vm->page_directory = NULL;
1561
1562 error_free_sched_entity:
1563         amd_sched_entity_fini(&ring->sched, &vm->entity);
1564
1565         return r;
1566 }
1567
1568 /**
1569  * amdgpu_vm_fini - tear down a vm instance
1570  *
1571  * @adev: amdgpu_device pointer
1572  * @vm: requested vm
1573  *
1574  * Tear down @vm.
1575  * Unbind the VM and remove all bos from the vm bo list
1576  */
1577 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1578 {
1579         struct amdgpu_bo_va_mapping *mapping, *tmp;
1580         int i;
1581
1582         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1583
1584         if (!RB_EMPTY_ROOT(&vm->va)) {
1585                 dev_err(adev->dev, "still active bo inside vm\n");
1586         }
1587         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1588                 list_del(&mapping->list);
1589                 interval_tree_remove(&mapping->it, &vm->va);
1590                 kfree(mapping);
1591         }
1592         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1593                 list_del(&mapping->list);
1594                 kfree(mapping);
1595         }
1596
1597         for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1598                 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1599         drm_free_large(vm->page_tables);
1600
1601         amdgpu_bo_unref(&vm->page_directory);
1602         fence_put(vm->page_directory_fence);
1603 }
1604
1605 /**
1606  * amdgpu_vm_manager_init - init the VM manager
1607  *
1608  * @adev: amdgpu_device pointer
1609  *
1610  * Initialize the VM manager structures
1611  */
1612 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1613 {
1614         unsigned i;
1615
1616         INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1617
1618         /* skip over VMID 0, since it is the system VM */
1619         for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1620                 amdgpu_vm_reset_id(adev, i);
1621                 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1622                 list_add_tail(&adev->vm_manager.ids[i].list,
1623                               &adev->vm_manager.ids_lru);
1624         }
1625
1626         adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1627         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1628                 adev->vm_manager.seqno[i] = 0;
1629
1630         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1631         atomic64_set(&adev->vm_manager.client_counter, 0);
1632 }
1633
1634 /**
1635  * amdgpu_vm_manager_fini - cleanup VM manager
1636  *
1637  * @adev: amdgpu_device pointer
1638  *
1639  * Cleanup the VM manager and free resources.
1640  */
1641 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1642 {
1643         unsigned i;
1644
1645         for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1646                 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1647
1648                 fence_put(adev->vm_manager.ids[i].first);
1649                 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1650                 fence_put(id->flushed_updates);
1651         }
1652 }