powerpc/mm/book3s: Rename hash specific PTE bits to carry H_ prefix
[cascardo/linux.git] / arch / powerpc / mm / pgtable-hash64.c
1 /*
2  * Copyright 2005, Paul Mackerras, IBM Corporation.
3  * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
4  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/sched.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlb.h>
15
16 #include "mmu_decl.h"
17
18 #ifdef CONFIG_SPARSEMEM_VMEMMAP
19 /*
20  * On hash-based CPUs, the vmemmap is bolted in the hash table.
21  *
22  */
23 int __meminit vmemmap_create_mapping(unsigned long start,
24                                      unsigned long page_size,
25                                      unsigned long phys)
26 {
27         int rc = htab_bolt_mapping(start, start + page_size, phys,
28                                    pgprot_val(PAGE_KERNEL),
29                                    mmu_vmemmap_psize, mmu_kernel_ssize);
30         if (rc < 0) {
31                 int rc2 = htab_remove_mapping(start, start + page_size,
32                                               mmu_vmemmap_psize,
33                                               mmu_kernel_ssize);
34                 BUG_ON(rc2 && (rc2 != -ENOENT));
35         }
36         return rc;
37 }
38
39 #ifdef CONFIG_MEMORY_HOTPLUG
40 void vmemmap_remove_mapping(unsigned long start,
41                             unsigned long page_size)
42 {
43         int rc = htab_remove_mapping(start, start + page_size,
44                                      mmu_vmemmap_psize,
45                                      mmu_kernel_ssize);
46         BUG_ON((rc < 0) && (rc != -ENOENT));
47         WARN_ON(rc == -ENOENT);
48 }
49 #endif
50 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
51
52 /*
53  * map_kernel_page currently only called by __ioremap
54  * map_kernel_page adds an entry to the ioremap page table
55  * and adds an entry to the HPT, possibly bolting it
56  */
57 int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
58 {
59         pgd_t *pgdp;
60         pud_t *pudp;
61         pmd_t *pmdp;
62         pte_t *ptep;
63
64         if (slab_is_available()) {
65                 pgdp = pgd_offset_k(ea);
66                 pudp = pud_alloc(&init_mm, pgdp, ea);
67                 if (!pudp)
68                         return -ENOMEM;
69                 pmdp = pmd_alloc(&init_mm, pudp, ea);
70                 if (!pmdp)
71                         return -ENOMEM;
72                 ptep = pte_alloc_kernel(pmdp, ea);
73                 if (!ptep)
74                         return -ENOMEM;
75                 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
76                                                           __pgprot(flags)));
77         } else {
78                 /*
79                  * If the mm subsystem is not fully up, we cannot create a
80                  * linux page table entry for this mapping.  Simply bolt an
81                  * entry in the hardware page table.
82                  *
83                  */
84                 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
85                                       mmu_io_psize, mmu_kernel_ssize)) {
86                         printk(KERN_ERR "Failed to do bolted mapping IO "
87                                "memory at %016lx !\n", pa);
88                         return -ENOMEM;
89                 }
90         }
91
92         smp_wmb();
93         return 0;
94 }