2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 #define GEN6_PPGTT_PD_ENTRIES 512
32 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
33 typedef uint64_t gen8_gtt_pte_t;
34 typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
37 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
38 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
40 #define GEN6_PDE_VALID (1 << 0)
41 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
42 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
44 #define GEN6_PTE_VALID (1 << 0)
45 #define GEN6_PTE_UNCACHED (1 << 1)
46 #define HSW_PTE_UNCACHED (0)
47 #define GEN6_PTE_CACHE_LLC (2 << 1)
48 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
49 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
50 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
52 /* Cacheability Control is a 4-bit value. The low three bits are stored in *
53 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
55 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
56 (((bits) & 0x8) << (11 - 3)))
57 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62 #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63 #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
64 #define GEN8_LEGACY_PDPS 4
66 #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
67 #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
68 #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
69 #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
71 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
72 enum i915_cache_level level,
75 gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
77 if (level != I915_CACHE_NONE)
78 pte |= PPAT_CACHED_INDEX;
80 pte |= PPAT_UNCACHED_INDEX;
84 static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
86 enum i915_cache_level level)
88 gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
90 if (level != I915_CACHE_NONE)
91 pde |= PPAT_CACHED_PDE_INDEX;
93 pde |= PPAT_UNCACHED_INDEX;
97 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
98 enum i915_cache_level level,
101 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
102 pte |= GEN6_PTE_ADDR_ENCODE(addr);
105 case I915_CACHE_L3_LLC:
107 pte |= GEN6_PTE_CACHE_LLC;
109 case I915_CACHE_NONE:
110 pte |= GEN6_PTE_UNCACHED;
119 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
120 enum i915_cache_level level,
123 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
124 pte |= GEN6_PTE_ADDR_ENCODE(addr);
127 case I915_CACHE_L3_LLC:
128 pte |= GEN7_PTE_CACHE_L3_LLC;
131 pte |= GEN6_PTE_CACHE_LLC;
133 case I915_CACHE_NONE:
134 pte |= GEN6_PTE_UNCACHED;
143 #define BYT_PTE_WRITEABLE (1 << 1)
144 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
146 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
147 enum i915_cache_level level,
150 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
151 pte |= GEN6_PTE_ADDR_ENCODE(addr);
153 /* Mark the page as writeable. Other platforms don't have a
154 * setting for read-only/writable, so this matches that behavior.
156 pte |= BYT_PTE_WRITEABLE;
158 if (level != I915_CACHE_NONE)
159 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
164 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
165 enum i915_cache_level level,
168 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
169 pte |= HSW_PTE_ADDR_ENCODE(addr);
171 if (level != I915_CACHE_NONE)
172 pte |= HSW_WB_LLC_AGE3;
177 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
178 enum i915_cache_level level,
181 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
182 pte |= HSW_PTE_ADDR_ENCODE(addr);
185 case I915_CACHE_NONE:
188 pte |= HSW_WT_ELLC_LLC_AGE0;
191 pte |= HSW_WB_ELLC_LLC_AGE0;
198 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
199 unsigned first_entry,
200 unsigned num_entries,
203 struct i915_hw_ppgtt *ppgtt =
204 container_of(vm, struct i915_hw_ppgtt, base);
205 gen8_gtt_pte_t *pt_vaddr, scratch_pte;
206 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
207 unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
208 unsigned last_pte, i;
210 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
211 I915_CACHE_LLC, use_scratch);
213 while (num_entries) {
214 struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
216 last_pte = first_pte + num_entries;
217 if (last_pte > GEN8_PTES_PER_PAGE)
218 last_pte = GEN8_PTES_PER_PAGE;
220 pt_vaddr = kmap_atomic(page_table);
222 for (i = first_pte; i < last_pte; i++)
223 pt_vaddr[i] = scratch_pte;
225 kunmap_atomic(pt_vaddr);
227 num_entries -= last_pte - first_pte;
233 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
234 struct sg_table *pages,
235 unsigned first_entry,
236 enum i915_cache_level cache_level)
238 struct i915_hw_ppgtt *ppgtt =
239 container_of(vm, struct i915_hw_ppgtt, base);
240 gen8_gtt_pte_t *pt_vaddr;
241 unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
242 unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
243 struct sg_page_iter sg_iter;
245 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
246 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
247 dma_addr_t page_addr;
249 page_addr = sg_dma_address(sg_iter.sg) +
250 (sg_iter.sg_pgoffset << PAGE_SHIFT);
251 pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
253 if (++act_pte == GEN8_PTES_PER_PAGE) {
254 kunmap_atomic(pt_vaddr);
256 pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
261 kunmap_atomic(pt_vaddr);
264 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
266 struct i915_hw_ppgtt *ppgtt =
267 container_of(vm, struct i915_hw_ppgtt, base);
270 for (i = 0; i < ppgtt->num_pd_pages ; i++) {
271 if (ppgtt->pd_dma_addr[i]) {
272 pci_unmap_page(ppgtt->base.dev->pdev,
273 ppgtt->pd_dma_addr[i],
274 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
276 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
277 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
279 pci_unmap_page(ppgtt->base.dev->pdev,
282 PCI_DMA_BIDIRECTIONAL);
286 kfree(ppgtt->gen8_pt_dma_addr[i]);
289 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
290 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
294 * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
295 * net effect resembling a 2-level page table in normal x86 terms. Each PDP
296 * represents 1GB of memory
297 * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
299 * TODO: Do something with the size parameter
301 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
303 struct page *pt_pages;
304 int i, j, ret = -ENOMEM;
305 const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
306 const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
309 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
311 /* FIXME: split allocation into smaller pieces. For now we only ever do
312 * this once, but with full PPGTT, the multiple contiguous allocations
315 ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
316 if (!ppgtt->pd_pages)
319 pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
321 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
325 ppgtt->gen8_pt_pages = pt_pages;
326 ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
327 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
328 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
329 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
330 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
331 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
333 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
336 * - Create a mapping for the page directories.
337 * - For each page directory:
338 * allocate space for page table mappings.
339 * map each page table
341 for (i = 0; i < max_pdp; i++) {
343 temp = pci_map_page(ppgtt->base.dev->pdev,
344 &ppgtt->pd_pages[i], 0,
345 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
346 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
349 ppgtt->pd_dma_addr[i] = temp;
351 ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
352 if (!ppgtt->gen8_pt_dma_addr[i])
355 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
356 struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
357 temp = pci_map_page(ppgtt->base.dev->pdev,
359 PCI_DMA_BIDIRECTIONAL);
361 if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
364 ppgtt->gen8_pt_dma_addr[i][j] = temp;
368 /* For now, the PPGTT helper functions all require that the PDEs are
369 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
370 * will never need to touch the PDEs again */
371 for (i = 0; i < max_pdp; i++) {
372 gen8_ppgtt_pde_t *pd_vaddr;
373 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
374 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
375 dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
376 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
379 kunmap_atomic(pd_vaddr);
382 ppgtt->base.clear_range(&ppgtt->base, 0,
383 ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
386 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
387 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
388 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
390 (ppgtt->num_pt_pages - num_pt_pages) +
392 return -ENOSYS; /* Not ready yet */
395 ppgtt->base.cleanup(&ppgtt->base);
399 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
401 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
402 gen6_gtt_pte_t __iomem *pd_addr;
406 WARN_ON(ppgtt->pd_offset & 0x3f);
407 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
408 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
409 for (i = 0; i < ppgtt->num_pd_entries; i++) {
412 pt_addr = ppgtt->pt_dma_addr[i];
413 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
414 pd_entry |= GEN6_PDE_VALID;
416 writel(pd_entry, pd_addr + i);
421 static int gen6_ppgtt_enable(struct drm_device *dev)
423 drm_i915_private_t *dev_priv = dev->dev_private;
425 struct intel_ring_buffer *ring;
426 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
429 BUG_ON(ppgtt->pd_offset & 0x3f);
431 gen6_write_pdes(ppgtt);
433 pd_offset = ppgtt->pd_offset;
434 pd_offset /= 64; /* in cachelines, */
437 if (INTEL_INFO(dev)->gen == 6) {
438 uint32_t ecochk, gab_ctl, ecobits;
440 ecobits = I915_READ(GAC_ECO_BITS);
441 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
442 ECOBITS_PPGTT_CACHE64B);
444 gab_ctl = I915_READ(GAB_CTL);
445 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
447 ecochk = I915_READ(GAM_ECOCHK);
448 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
449 ECOCHK_PPGTT_CACHE64B);
450 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
451 } else if (INTEL_INFO(dev)->gen >= 7) {
452 uint32_t ecochk, ecobits;
454 ecobits = I915_READ(GAC_ECO_BITS);
455 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
457 ecochk = I915_READ(GAM_ECOCHK);
458 if (IS_HASWELL(dev)) {
459 ecochk |= ECOCHK_PPGTT_WB_HSW;
461 ecochk |= ECOCHK_PPGTT_LLC_IVB;
462 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
464 I915_WRITE(GAM_ECOCHK, ecochk);
465 /* GFX_MODE is per-ring on gen7+ */
468 for_each_ring(ring, dev_priv, i) {
469 if (INTEL_INFO(dev)->gen >= 7)
470 I915_WRITE(RING_MODE_GEN7(ring),
471 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
473 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
474 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
479 /* PPGTT support for Sandybdrige/Gen6 and later */
480 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
481 unsigned first_entry,
482 unsigned num_entries,
485 struct i915_hw_ppgtt *ppgtt =
486 container_of(vm, struct i915_hw_ppgtt, base);
487 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
488 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
489 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
490 unsigned last_pte, i;
492 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
494 while (num_entries) {
495 last_pte = first_pte + num_entries;
496 if (last_pte > I915_PPGTT_PT_ENTRIES)
497 last_pte = I915_PPGTT_PT_ENTRIES;
499 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
501 for (i = first_pte; i < last_pte; i++)
502 pt_vaddr[i] = scratch_pte;
504 kunmap_atomic(pt_vaddr);
506 num_entries -= last_pte - first_pte;
512 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
513 struct sg_table *pages,
514 unsigned first_entry,
515 enum i915_cache_level cache_level)
517 struct i915_hw_ppgtt *ppgtt =
518 container_of(vm, struct i915_hw_ppgtt, base);
519 gen6_gtt_pte_t *pt_vaddr;
520 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
521 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
522 struct sg_page_iter sg_iter;
524 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
525 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
526 dma_addr_t page_addr;
528 page_addr = sg_page_iter_dma_address(&sg_iter);
529 pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
530 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
531 kunmap_atomic(pt_vaddr);
533 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
538 kunmap_atomic(pt_vaddr);
541 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
543 struct i915_hw_ppgtt *ppgtt =
544 container_of(vm, struct i915_hw_ppgtt, base);
547 drm_mm_takedown(&ppgtt->base.mm);
549 if (ppgtt->pt_dma_addr) {
550 for (i = 0; i < ppgtt->num_pd_entries; i++)
551 pci_unmap_page(ppgtt->base.dev->pdev,
552 ppgtt->pt_dma_addr[i],
553 4096, PCI_DMA_BIDIRECTIONAL);
556 kfree(ppgtt->pt_dma_addr);
557 for (i = 0; i < ppgtt->num_pd_entries; i++)
558 __free_page(ppgtt->pt_pages[i]);
559 kfree(ppgtt->pt_pages);
563 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
565 struct drm_device *dev = ppgtt->base.dev;
566 struct drm_i915_private *dev_priv = dev->dev_private;
567 unsigned first_pd_entry_in_global_pt;
571 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
572 * entries. For aliasing ppgtt support we just steal them at the end for
574 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
576 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
577 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
578 ppgtt->enable = gen6_ppgtt_enable;
579 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
580 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
581 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
582 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
583 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
585 if (!ppgtt->pt_pages)
588 for (i = 0; i < ppgtt->num_pd_entries; i++) {
589 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
590 if (!ppgtt->pt_pages[i])
594 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
596 if (!ppgtt->pt_dma_addr)
599 for (i = 0; i < ppgtt->num_pd_entries; i++) {
602 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
603 PCI_DMA_BIDIRECTIONAL);
605 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
610 ppgtt->pt_dma_addr[i] = pt_addr;
613 ppgtt->base.clear_range(&ppgtt->base, 0,
614 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
616 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
621 if (ppgtt->pt_dma_addr) {
622 for (i--; i >= 0; i--)
623 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
624 4096, PCI_DMA_BIDIRECTIONAL);
627 kfree(ppgtt->pt_dma_addr);
628 for (i = 0; i < ppgtt->num_pd_entries; i++) {
629 if (ppgtt->pt_pages[i])
630 __free_page(ppgtt->pt_pages[i]);
632 kfree(ppgtt->pt_pages);
637 static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
639 struct drm_i915_private *dev_priv = dev->dev_private;
640 struct i915_hw_ppgtt *ppgtt;
643 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
647 ppgtt->base.dev = dev;
649 if (INTEL_INFO(dev)->gen < 8)
650 ret = gen6_ppgtt_init(ppgtt);
651 else if (IS_GEN8(dev))
652 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
659 dev_priv->mm.aliasing_ppgtt = ppgtt;
660 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
667 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
669 struct drm_i915_private *dev_priv = dev->dev_private;
670 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
675 ppgtt->base.cleanup(&ppgtt->base);
676 dev_priv->mm.aliasing_ppgtt = NULL;
679 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
680 struct drm_i915_gem_object *obj,
681 enum i915_cache_level cache_level)
683 ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
684 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
688 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
689 struct drm_i915_gem_object *obj)
691 ppgtt->base.clear_range(&ppgtt->base,
692 i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
693 obj->base.size >> PAGE_SHIFT,
697 extern int intel_iommu_gfx_mapped;
698 /* Certain Gen5 chipsets require require idling the GPU before
699 * unmapping anything from the GTT when VT-d is enabled.
701 static inline bool needs_idle_maps(struct drm_device *dev)
703 #ifdef CONFIG_INTEL_IOMMU
704 /* Query intel_iommu to see if we need the workaround. Presumably that
707 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
713 static bool do_idling(struct drm_i915_private *dev_priv)
715 bool ret = dev_priv->mm.interruptible;
717 if (unlikely(dev_priv->gtt.do_idle_maps)) {
718 dev_priv->mm.interruptible = false;
719 if (i915_gpu_idle(dev_priv->dev)) {
720 DRM_ERROR("Couldn't idle GPU\n");
721 /* Wait a bit, in hopes it avoids the hang */
729 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
731 if (unlikely(dev_priv->gtt.do_idle_maps))
732 dev_priv->mm.interruptible = interruptible;
735 void i915_check_and_clear_faults(struct drm_device *dev)
737 struct drm_i915_private *dev_priv = dev->dev_private;
738 struct intel_ring_buffer *ring;
741 if (INTEL_INFO(dev)->gen < 6)
744 for_each_ring(ring, dev_priv, i) {
746 fault_reg = I915_READ(RING_FAULT_REG(ring));
747 if (fault_reg & RING_FAULT_VALID) {
748 DRM_DEBUG_DRIVER("Unexpected fault\n"
750 "\tAddress space: %s\n"
753 fault_reg & PAGE_MASK,
754 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
755 RING_FAULT_SRCID(fault_reg),
756 RING_FAULT_FAULT_TYPE(fault_reg));
757 I915_WRITE(RING_FAULT_REG(ring),
758 fault_reg & ~RING_FAULT_VALID);
761 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
764 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
766 struct drm_i915_private *dev_priv = dev->dev_private;
768 /* Don't bother messing with faults pre GEN6 as we have little
769 * documentation supporting that it's a good idea.
771 if (INTEL_INFO(dev)->gen < 6)
774 i915_check_and_clear_faults(dev);
776 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
777 dev_priv->gtt.base.start / PAGE_SIZE,
778 dev_priv->gtt.base.total / PAGE_SIZE,
782 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
784 struct drm_i915_private *dev_priv = dev->dev_private;
785 struct drm_i915_gem_object *obj;
787 i915_check_and_clear_faults(dev);
789 /* First fill our portion of the GTT with scratch pages */
790 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
791 dev_priv->gtt.base.start / PAGE_SIZE,
792 dev_priv->gtt.base.total / PAGE_SIZE,
795 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
796 i915_gem_clflush_object(obj, obj->pin_display);
797 i915_gem_gtt_bind_object(obj, obj->cache_level);
800 i915_gem_chipset_flush(dev);
803 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
805 if (obj->has_dma_mapping)
808 if (!dma_map_sg(&obj->base.dev->pdev->dev,
809 obj->pages->sgl, obj->pages->nents,
810 PCI_DMA_BIDIRECTIONAL))
816 static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
821 iowrite32((u32)pte, addr);
822 iowrite32(pte >> 32, addr + 4);
826 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
828 unsigned int first_entry,
829 enum i915_cache_level level)
831 struct drm_i915_private *dev_priv = vm->dev->dev_private;
832 gen8_gtt_pte_t __iomem *gtt_entries =
833 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
835 struct sg_page_iter sg_iter;
838 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
839 addr = sg_dma_address(sg_iter.sg) +
840 (sg_iter.sg_pgoffset << PAGE_SHIFT);
841 gen8_set_pte(>t_entries[i],
842 gen8_pte_encode(addr, level, true));
847 * XXX: This serves as a posting read to make sure that the PTE has
848 * actually been updated. There is some concern that even though
849 * registers and PTEs are within the same BAR that they are potentially
850 * of NUMA access patterns. Therefore, even with the way we assume
851 * hardware should work, we must keep this posting read for paranoia.
854 WARN_ON(readq(>t_entries[i-1])
855 != gen8_pte_encode(addr, level, true));
857 #if 0 /* TODO: Still needed on GEN8? */
858 /* This next bit makes the above posting read even more important. We
859 * want to flush the TLBs only after we're certain all the PTE updates
862 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
863 POSTING_READ(GFX_FLSH_CNTL_GEN6);
868 * Binds an object into the global gtt with the specified cache level. The object
869 * will be accessible to the GPU via commands whose operands reference offsets
870 * within the global GTT as well as accessible by the GPU through the GMADR
871 * mapped BAR (dev_priv->mm.gtt->gtt).
873 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
875 unsigned int first_entry,
876 enum i915_cache_level level)
878 struct drm_i915_private *dev_priv = vm->dev->dev_private;
879 gen6_gtt_pte_t __iomem *gtt_entries =
880 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
882 struct sg_page_iter sg_iter;
885 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
886 addr = sg_page_iter_dma_address(&sg_iter);
887 iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]);
891 /* XXX: This serves as a posting read to make sure that the PTE has
892 * actually been updated. There is some concern that even though
893 * registers and PTEs are within the same BAR that they are potentially
894 * of NUMA access patterns. Therefore, even with the way we assume
895 * hardware should work, we must keep this posting read for paranoia.
898 WARN_ON(readl(>t_entries[i-1]) !=
899 vm->pte_encode(addr, level, true));
901 /* This next bit makes the above posting read even more important. We
902 * want to flush the TLBs only after we're certain all the PTE updates
905 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
906 POSTING_READ(GFX_FLSH_CNTL_GEN6);
909 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
910 unsigned int first_entry,
911 unsigned int num_entries,
914 struct drm_i915_private *dev_priv = vm->dev->dev_private;
915 gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
916 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
917 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
920 if (WARN(num_entries > max_entries,
921 "First entry = %d; Num entries = %d (max=%d)\n",
922 first_entry, num_entries, max_entries))
923 num_entries = max_entries;
925 scratch_pte = gen8_pte_encode(vm->scratch.addr,
928 for (i = 0; i < num_entries; i++)
929 gen8_set_pte(>t_base[i], scratch_pte);
933 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
934 unsigned int first_entry,
935 unsigned int num_entries,
938 struct drm_i915_private *dev_priv = vm->dev->dev_private;
939 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
940 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
941 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
944 if (WARN(num_entries > max_entries,
945 "First entry = %d; Num entries = %d (max=%d)\n",
946 first_entry, num_entries, max_entries))
947 num_entries = max_entries;
949 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
951 for (i = 0; i < num_entries; i++)
952 iowrite32(scratch_pte, >t_base[i]);
956 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
958 unsigned int pg_start,
959 enum i915_cache_level cache_level)
961 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
962 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
964 intel_gtt_insert_sg_entries(st, pg_start, flags);
968 static void i915_ggtt_clear_range(struct i915_address_space *vm,
969 unsigned int first_entry,
970 unsigned int num_entries,
973 intel_gtt_clear_range(first_entry, num_entries);
977 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
978 enum i915_cache_level cache_level)
980 struct drm_device *dev = obj->base.dev;
981 struct drm_i915_private *dev_priv = dev->dev_private;
982 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
984 dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
988 obj->has_global_gtt_mapping = 1;
991 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
993 struct drm_device *dev = obj->base.dev;
994 struct drm_i915_private *dev_priv = dev->dev_private;
995 const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
997 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
999 obj->base.size >> PAGE_SHIFT,
1002 obj->has_global_gtt_mapping = 0;
1005 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1007 struct drm_device *dev = obj->base.dev;
1008 struct drm_i915_private *dev_priv = dev->dev_private;
1011 interruptible = do_idling(dev_priv);
1013 if (!obj->has_dma_mapping)
1014 dma_unmap_sg(&dev->pdev->dev,
1015 obj->pages->sgl, obj->pages->nents,
1016 PCI_DMA_BIDIRECTIONAL);
1018 undo_idling(dev_priv, interruptible);
1021 static void i915_gtt_color_adjust(struct drm_mm_node *node,
1022 unsigned long color,
1023 unsigned long *start,
1026 if (node->color != color)
1029 if (!list_empty(&node->node_list)) {
1030 node = list_entry(node->node_list.next,
1033 if (node->allocated && node->color != color)
1038 void i915_gem_setup_global_gtt(struct drm_device *dev,
1039 unsigned long start,
1040 unsigned long mappable_end,
1043 /* Let GEM Manage all of the aperture.
1045 * However, leave one page at the end still bound to the scratch page.
1046 * There are a number of places where the hardware apparently prefetches
1047 * past the end of the object, and we've seen multiple hangs with the
1048 * GPU head pointer stuck in a batchbuffer bound at the last page of the
1049 * aperture. One page should be enough to keep any prefetching inside
1052 struct drm_i915_private *dev_priv = dev->dev_private;
1053 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1054 struct drm_mm_node *entry;
1055 struct drm_i915_gem_object *obj;
1056 unsigned long hole_start, hole_end;
1058 BUG_ON(mappable_end > end);
1060 /* Subtract the guard page ... */
1061 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
1063 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1065 /* Mark any preallocated objects as occupied */
1066 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1067 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1069 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1070 i915_gem_obj_ggtt_offset(obj), obj->base.size);
1072 WARN_ON(i915_gem_obj_ggtt_bound(obj));
1073 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1075 DRM_DEBUG_KMS("Reservation failed\n");
1076 obj->has_global_gtt_mapping = 1;
1077 list_add(&vma->vma_link, &obj->vma_list);
1080 dev_priv->gtt.base.start = start;
1081 dev_priv->gtt.base.total = end - start;
1083 /* Clear any non-preallocated blocks */
1084 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1085 const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
1086 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1087 hole_start, hole_end);
1088 ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
1091 /* And finally clear the reserved guard page */
1092 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
1096 intel_enable_ppgtt(struct drm_device *dev)
1098 if (i915_enable_ppgtt >= 0)
1099 return i915_enable_ppgtt;
1101 #ifdef CONFIG_INTEL_IOMMU
1102 /* Disable ppgtt on SNB if VT-d is on. */
1103 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
1110 void i915_gem_init_global_gtt(struct drm_device *dev)
1112 struct drm_i915_private *dev_priv = dev->dev_private;
1113 unsigned long gtt_size, mappable_size;
1115 gtt_size = dev_priv->gtt.base.total;
1116 mappable_size = dev_priv->gtt.mappable_end;
1118 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
1121 if (INTEL_INFO(dev)->gen <= 7) {
1122 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1123 * aperture accordingly when using aliasing ppgtt. */
1124 gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
1127 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1129 ret = i915_gem_init_aliasing_ppgtt(dev);
1133 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
1134 drm_mm_takedown(&dev_priv->gtt.base.mm);
1135 gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
1137 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1140 static int setup_scratch_page(struct drm_device *dev)
1142 struct drm_i915_private *dev_priv = dev->dev_private;
1144 dma_addr_t dma_addr;
1146 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1150 set_pages_uc(page, 1);
1152 #ifdef CONFIG_INTEL_IOMMU
1153 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
1154 PCI_DMA_BIDIRECTIONAL);
1155 if (pci_dma_mapping_error(dev->pdev, dma_addr))
1158 dma_addr = page_to_phys(page);
1160 dev_priv->gtt.base.scratch.page = page;
1161 dev_priv->gtt.base.scratch.addr = dma_addr;
1166 static void teardown_scratch_page(struct drm_device *dev)
1168 struct drm_i915_private *dev_priv = dev->dev_private;
1169 struct page *page = dev_priv->gtt.base.scratch.page;
1171 set_pages_wb(page, 1);
1172 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1173 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1178 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
1180 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
1181 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
1182 return snb_gmch_ctl << 20;
1185 static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1187 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1188 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1190 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1191 return bdw_gmch_ctl << 20;
1194 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
1196 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
1197 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
1198 return snb_gmch_ctl << 25; /* 32 MB units */
1201 static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1203 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1204 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1205 return bdw_gmch_ctl << 25; /* 32 MB units */
1208 static int ggtt_probe_common(struct drm_device *dev,
1211 struct drm_i915_private *dev_priv = dev->dev_private;
1212 phys_addr_t gtt_bus_addr;
1215 /* For Modern GENs the PTEs and register space are split in the BAR */
1216 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
1217 (pci_resource_len(dev->pdev, 0) / 2);
1219 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
1220 if (!dev_priv->gtt.gsm) {
1221 DRM_ERROR("Failed to map the gtt page table\n");
1225 ret = setup_scratch_page(dev);
1227 DRM_ERROR("Scratch setup failed\n");
1228 /* iounmap will also get called at remove, but meh */
1229 iounmap(dev_priv->gtt.gsm);
1235 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1236 * bits. When using advanced contexts each context stores its own PAT, but
1237 * writing this data shouldn't be harmful even in those cases. */
1238 static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
1240 #define GEN8_PPAT_UC (0<<0)
1241 #define GEN8_PPAT_WC (1<<0)
1242 #define GEN8_PPAT_WT (2<<0)
1243 #define GEN8_PPAT_WB (3<<0)
1244 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
1245 /* FIXME(BDW): Bspec is completely confused about cache control bits. */
1246 #define GEN8_PPAT_LLC (1<<2)
1247 #define GEN8_PPAT_LLCELLC (2<<2)
1248 #define GEN8_PPAT_LLCeLLC (3<<2)
1249 #define GEN8_PPAT_AGE(x) (x<<4)
1250 #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
1253 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
1254 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1255 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1256 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
1257 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1258 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1259 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1260 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1262 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1263 * write would work. */
1264 I915_WRITE(GEN8_PRIVATE_PAT, pat);
1265 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1268 static int gen8_gmch_probe(struct drm_device *dev,
1271 phys_addr_t *mappable_base,
1272 unsigned long *mappable_end)
1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 unsigned int gtt_size;
1279 /* TODO: We're not aware of mappable constraints on gen8 yet */
1280 *mappable_base = pci_resource_start(dev->pdev, 2);
1281 *mappable_end = pci_resource_len(dev->pdev, 2);
1283 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1284 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1286 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1288 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1290 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1291 *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1293 gen8_setup_private_ppat(dev_priv);
1295 ret = ggtt_probe_common(dev, gtt_size);
1297 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1298 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
1303 static int gen6_gmch_probe(struct drm_device *dev,
1306 phys_addr_t *mappable_base,
1307 unsigned long *mappable_end)
1309 struct drm_i915_private *dev_priv = dev->dev_private;
1310 unsigned int gtt_size;
1314 *mappable_base = pci_resource_start(dev->pdev, 2);
1315 *mappable_end = pci_resource_len(dev->pdev, 2);
1317 /* 64/512MB is the current min/max we actually know of, but this is just
1318 * a coarse sanity check.
1320 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
1321 DRM_ERROR("Unknown GMADR size (%lx)\n",
1322 dev_priv->gtt.mappable_end);
1326 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
1327 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
1328 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1330 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
1332 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
1333 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
1335 ret = ggtt_probe_common(dev, gtt_size);
1337 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
1338 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
1343 static void gen6_gmch_remove(struct i915_address_space *vm)
1346 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
1348 teardown_scratch_page(vm->dev);
1351 static int i915_gmch_probe(struct drm_device *dev,
1354 phys_addr_t *mappable_base,
1355 unsigned long *mappable_end)
1357 struct drm_i915_private *dev_priv = dev->dev_private;
1360 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
1362 DRM_ERROR("failed to set up gmch\n");
1366 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
1368 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
1369 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
1370 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1375 static void i915_gmch_remove(struct i915_address_space *vm)
1377 intel_gmch_remove();
1380 int i915_gem_gtt_init(struct drm_device *dev)
1382 struct drm_i915_private *dev_priv = dev->dev_private;
1383 struct i915_gtt *gtt = &dev_priv->gtt;
1386 if (INTEL_INFO(dev)->gen <= 5) {
1387 gtt->gtt_probe = i915_gmch_probe;
1388 gtt->base.cleanup = i915_gmch_remove;
1389 } else if (INTEL_INFO(dev)->gen < 8) {
1390 gtt->gtt_probe = gen6_gmch_probe;
1391 gtt->base.cleanup = gen6_gmch_remove;
1392 if (IS_HASWELL(dev) && dev_priv->ellc_size)
1393 gtt->base.pte_encode = iris_pte_encode;
1394 else if (IS_HASWELL(dev))
1395 gtt->base.pte_encode = hsw_pte_encode;
1396 else if (IS_VALLEYVIEW(dev))
1397 gtt->base.pte_encode = byt_pte_encode;
1398 else if (INTEL_INFO(dev)->gen >= 7)
1399 gtt->base.pte_encode = ivb_pte_encode;
1401 gtt->base.pte_encode = snb_pte_encode;
1403 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
1404 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
1407 ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
1408 >t->mappable_base, >t->mappable_end);
1412 gtt->base.dev = dev;
1414 /* GMADR is the PCI mmio aperture into the global GTT. */
1415 DRM_INFO("Memory usable by graphics device = %zdM\n",
1416 gtt->base.total >> 20);
1417 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
1418 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);