Merge branches 'pm-cpuidle', 'pm-cpufreq' and 'acpi-resources'
[cascardo/linux.git] / arch / x86 / mm / kasan_init_64.c
1 #define pr_fmt(fmt) "kasan: " fmt
2 #include <linux/bootmem.h>
3 #include <linux/kasan.h>
4 #include <linux/kdebug.h>
5 #include <linux/mm.h>
6 #include <linux/sched.h>
7 #include <linux/vmalloc.h>
8
9 #include <asm/tlbflush.h>
10 #include <asm/sections.h>
11
12 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
13 extern struct range pfn_mapped[E820_X_MAX];
14
15 static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
16 static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
17 static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
18
19 /*
20  * This page used as early shadow. We don't use empty_zero_page
21  * at early stages, stack instrumentation could write some garbage
22  * to this page.
23  * Latter we reuse it as zero shadow for large ranges of memory
24  * that allowed to access, but not instrumented by kasan
25  * (vmalloc/vmemmap ...).
26  */
27 static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
28
29 static int __init map_range(struct range *range)
30 {
31         unsigned long start;
32         unsigned long end;
33
34         start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
35         end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
36
37         /*
38          * end + 1 here is intentional. We check several shadow bytes in advance
39          * to slightly speed up fastpath. In some rare cases we could cross
40          * boundary of mapped shadow, so we just map some more here.
41          */
42         return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
43 }
44
45 static void __init clear_pgds(unsigned long start,
46                         unsigned long end)
47 {
48         for (; start < end; start += PGDIR_SIZE)
49                 pgd_clear(pgd_offset_k(start));
50 }
51
52 static void __init kasan_map_early_shadow(pgd_t *pgd)
53 {
54         int i;
55         unsigned long start = KASAN_SHADOW_START;
56         unsigned long end = KASAN_SHADOW_END;
57
58         for (i = pgd_index(start); start < end; i++) {
59                 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
60                                 | _KERNPG_TABLE);
61                 start += PGDIR_SIZE;
62         }
63 }
64
65 static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
66                                 unsigned long end)
67 {
68         pte_t *pte = pte_offset_kernel(pmd, addr);
69
70         while (addr + PAGE_SIZE <= end) {
71                 WARN_ON(!pte_none(*pte));
72                 set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
73                                         | __PAGE_KERNEL_RO));
74                 addr += PAGE_SIZE;
75                 pte = pte_offset_kernel(pmd, addr);
76         }
77         return 0;
78 }
79
80 static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
81                                 unsigned long end)
82 {
83         int ret = 0;
84         pmd_t *pmd = pmd_offset(pud, addr);
85
86         while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
87                 WARN_ON(!pmd_none(*pmd));
88                 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
89                                         | _KERNPG_TABLE));
90                 addr += PMD_SIZE;
91                 pmd = pmd_offset(pud, addr);
92         }
93         if (addr < end) {
94                 if (pmd_none(*pmd)) {
95                         void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
96                         if (!p)
97                                 return -ENOMEM;
98                         set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
99                 }
100                 ret = zero_pte_populate(pmd, addr, end);
101         }
102         return ret;
103 }
104
105
106 static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
107                                 unsigned long end)
108 {
109         int ret = 0;
110         pud_t *pud = pud_offset(pgd, addr);
111
112         while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
113                 WARN_ON(!pud_none(*pud));
114                 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
115                                         | _KERNPG_TABLE));
116                 addr += PUD_SIZE;
117                 pud = pud_offset(pgd, addr);
118         }
119
120         if (addr < end) {
121                 if (pud_none(*pud)) {
122                         void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
123                         if (!p)
124                                 return -ENOMEM;
125                         set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
126                 }
127                 ret = zero_pmd_populate(pud, addr, end);
128         }
129         return ret;
130 }
131
132 static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
133 {
134         int ret = 0;
135         pgd_t *pgd = pgd_offset_k(addr);
136
137         while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
138                 WARN_ON(!pgd_none(*pgd));
139                 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
140                                         | _KERNPG_TABLE));
141                 addr += PGDIR_SIZE;
142                 pgd = pgd_offset_k(addr);
143         }
144
145         if (addr < end) {
146                 if (pgd_none(*pgd)) {
147                         void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
148                         if (!p)
149                                 return -ENOMEM;
150                         set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
151                 }
152                 ret = zero_pud_populate(pgd, addr, end);
153         }
154         return ret;
155 }
156
157
158 static void __init populate_zero_shadow(const void *start, const void *end)
159 {
160         if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
161                 panic("kasan: unable to map zero shadow!");
162 }
163
164
165 #ifdef CONFIG_KASAN_INLINE
166 static int kasan_die_handler(struct notifier_block *self,
167                              unsigned long val,
168                              void *data)
169 {
170         if (val == DIE_GPF) {
171                 pr_emerg("CONFIG_KASAN_INLINE enabled");
172                 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
173         }
174         return NOTIFY_OK;
175 }
176
177 static struct notifier_block kasan_die_notifier = {
178         .notifier_call = kasan_die_handler,
179 };
180 #endif
181
182 void __init kasan_early_init(void)
183 {
184         int i;
185         pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
186         pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
187         pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
188
189         for (i = 0; i < PTRS_PER_PTE; i++)
190                 kasan_zero_pte[i] = __pte(pte_val);
191
192         for (i = 0; i < PTRS_PER_PMD; i++)
193                 kasan_zero_pmd[i] = __pmd(pmd_val);
194
195         for (i = 0; i < PTRS_PER_PUD; i++)
196                 kasan_zero_pud[i] = __pud(pud_val);
197
198         kasan_map_early_shadow(early_level4_pgt);
199         kasan_map_early_shadow(init_level4_pgt);
200 }
201
202 void __init kasan_init(void)
203 {
204         int i;
205
206 #ifdef CONFIG_KASAN_INLINE
207         register_die_notifier(&kasan_die_notifier);
208 #endif
209
210         memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
211         load_cr3(early_level4_pgt);
212         __flush_tlb_all();
213
214         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
215
216         populate_zero_shadow((void *)KASAN_SHADOW_START,
217                         kasan_mem_to_shadow((void *)PAGE_OFFSET));
218
219         for (i = 0; i < E820_X_MAX; i++) {
220                 if (pfn_mapped[i].end == 0)
221                         break;
222
223                 if (map_range(&pfn_mapped[i]))
224                         panic("kasan: unable to allocate shadow!");
225         }
226         populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
227                         kasan_mem_to_shadow((void *)__START_KERNEL_map));
228
229         vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
230                         (unsigned long)kasan_mem_to_shadow(_end),
231                         NUMA_NO_NODE);
232
233         populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
234                         (void *)KASAN_SHADOW_END);
235
236         memset(kasan_zero_page, 0, PAGE_SIZE);
237
238         load_cr3(init_level4_pgt);
239         __flush_tlb_all();
240         init_task.kasan_depth = 0;
241
242         pr_info("Kernel address sanitizer initialized\n");
243 }