2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/sched.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
18 #include <asm/machdep.h>
20 #include <asm/firmware.h>
22 static int native_update_partition_table(u64 patb1)
24 partition_tb->patb1 = cpu_to_be64(patb1);
28 static __ref void *early_alloc_pgtable(unsigned long size)
32 pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
38 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
40 unsigned int map_page_size)
47 * Make sure task size is correct as per the max adddr
49 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
50 if (slab_is_available()) {
51 pgdp = pgd_offset_k(ea);
52 pudp = pud_alloc(&init_mm, pgdp, ea);
55 if (map_page_size == PUD_SIZE) {
59 pmdp = pmd_alloc(&init_mm, pudp, ea);
62 if (map_page_size == PMD_SIZE) {
66 ptep = pte_alloc_kernel(pmdp, ea);
70 pgdp = pgd_offset_k(ea);
71 if (pgd_none(*pgdp)) {
72 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
74 pgd_populate(&init_mm, pgdp, pudp);
76 pudp = pud_offset(pgdp, ea);
77 if (map_page_size == PUD_SIZE) {
81 if (pud_none(*pudp)) {
82 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
84 pud_populate(&init_mm, pudp, pmdp);
86 pmdp = pmd_offset(pudp, ea);
87 if (map_page_size == PMD_SIZE) {
91 if (!pmd_present(*pmdp)) {
92 ptep = early_alloc_pgtable(PAGE_SIZE);
94 pmd_populate_kernel(&init_mm, pmdp, ptep);
96 ptep = pte_offset_kernel(pmdp, ea);
100 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
105 static void __init radix_init_pgtable(void)
108 u64 base, end, start_addr;
109 unsigned long rts_field;
110 struct memblock_region *reg;
111 unsigned long linear_page_size;
113 /* We don't support slb for radix */
116 * Create the linear mapping, using standard page size for now
119 for_each_memblock(memory, reg) {
121 start_addr = reg->base;
124 if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
125 linear_page_size = PUD_SIZE;
126 else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
127 linear_page_size = PMD_SIZE;
129 linear_page_size = PAGE_SIZE;
131 base = _ALIGN_UP(start_addr, linear_page_size);
132 end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
134 pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
135 (unsigned long)base, (unsigned long)end,
139 radix__map_kernel_page((unsigned long)__va(base),
142 base += linear_page_size;
145 * map the rest using lower page size
147 if (end < reg->base + reg->size) {
154 * Allocate Partition table and process table for the
157 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large.");
158 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
160 * Fill in the process table.
161 * we support 52 bits, hence 52-28 = 24, 11000
163 rts_field = 3ull << PPC_BITLSHIFT(2);
164 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
166 * Fill in the partition table. We are suppose to use effective address
167 * of process table here. But our linear mapping also enable us to use
168 * physical address here.
170 ppc_md.update_partition_table(__pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR);
171 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
174 static void __init radix_init_partition_table(void)
176 unsigned long rts_field;
178 * we support 52 bits, hence 52-28 = 24, 11000
180 rts_field = 3ull << PPC_BITLSHIFT(2);
182 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
183 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
184 partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
185 RADIX_PGD_INDEX_SIZE | PATB_HR);
186 printk("Partition table %p\n", partition_tb);
188 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
190 * update partition table control register,
193 mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
196 void __init radix_init_native(void)
198 ppc_md.update_partition_table = native_update_partition_table;
201 static int __init get_idx_from_shift(unsigned int shift)
222 static int __init radix_dt_scan_page_sizes(unsigned long node,
223 const char *uname, int depth,
230 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
232 /* We are scanning "cpu" nodes only */
233 if (type == NULL || strcmp(type, "cpu") != 0)
236 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
240 pr_info("Page sizes from device-tree:\n");
241 for (; size >= 4; size -= 4, ++prop) {
243 struct mmu_psize_def *def;
245 /* top 3 bit is AP encoding */
246 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
247 ap = be32_to_cpu(prop[0]) >> 29;
248 pr_info("Page size sift = %d AP=0x%x\n", shift, ap);
250 idx = get_idx_from_shift(shift);
254 def = &mmu_psize_defs[idx];
260 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
264 static void __init radix_init_page_sizes(void)
269 * Try to find the available page sizes in the device-tree
271 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
272 if (rc != 0) /* Found */
275 * let's assume we have page 4k and 64k support
277 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
278 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
280 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
281 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
283 #ifdef CONFIG_SPARSEMEM_VMEMMAP
284 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
286 * map vmemmap using 2M if available
288 mmu_vmemmap_psize = MMU_PAGE_2M;
290 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
294 void __init radix__early_init_mmu(void)
298 * setup LPCR UPRT based on mmu_features
300 lpcr = mfspr(SPRN_LPCR);
301 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
303 #ifdef CONFIG_PPC_64K_PAGES
304 /* PAGE_SIZE mappings */
305 mmu_virtual_psize = MMU_PAGE_64K;
307 mmu_virtual_psize = MMU_PAGE_4K;
310 #ifdef CONFIG_SPARSEMEM_VMEMMAP
311 /* vmemmap mapping */
312 mmu_vmemmap_psize = mmu_virtual_psize;
315 * initialize page table size
317 __pte_index_size = RADIX_PTE_INDEX_SIZE;
318 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
319 __pud_index_size = RADIX_PUD_INDEX_SIZE;
320 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
321 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
322 __pte_table_size = RADIX_PTE_TABLE_SIZE;
323 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
324 __pud_table_size = RADIX_PUD_TABLE_SIZE;
325 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
327 __pmd_val_bits = RADIX_PMD_VAL_BITS;
328 __pud_val_bits = RADIX_PUD_VAL_BITS;
329 __pgd_val_bits = RADIX_PGD_VAL_BITS;
331 radix_init_page_sizes();
332 if (!firmware_has_feature(FW_FEATURE_LPAR))
333 radix_init_partition_table();
335 radix_init_pgtable();
338 void radix__early_init_mmu_secondary(void)
342 * setup LPCR UPRT based on mmu_features
344 lpcr = mfspr(SPRN_LPCR);
345 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
347 * update partition table control register, 64 K size.
349 if (!firmware_has_feature(FW_FEATURE_LPAR))
351 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
354 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
355 phys_addr_t first_memblock_size)
357 /* We don't currently support the first MEMBLOCK not mapping 0
358 * physical on those processors
360 BUG_ON(first_memblock_base != 0);
362 * We limit the allocation that depend on ppc64_rma_size
363 * to first_memblock_size. We also clamp it to 1GB to
364 * avoid some funky things such as RTAS bugs.
366 * On radix config we really don't have a limitation
367 * on real mode access. But keeping it as above works
370 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
372 * Finally limit subsequent allocations. We really don't want
373 * to limit the memblock allocations to rma_size. FIXME!! should
374 * we even limit at all ?
376 memblock_set_current_limit(first_memblock_base + first_memblock_size);
379 #ifdef CONFIG_SPARSEMEM_VMEMMAP
380 int __meminit radix__vmemmap_create_mapping(unsigned long start,
381 unsigned long page_size,
384 /* Create a PTE encoding */
385 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
387 BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
391 #ifdef CONFIG_MEMORY_HOTPLUG
392 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
394 /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */