2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/setup.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
20 static DEFINE_MUTEX(vmem_mutex);
22 struct memory_segment {
23 struct list_head list;
28 static LIST_HEAD(mem_segs);
30 static void __ref *vmem_alloc_pages(unsigned int order)
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
37 static inline pud_t *vmem_pud_alloc(void)
41 pud = vmem_alloc_pages(2);
44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
48 static inline pmd_t *vmem_pmd_alloc(void)
52 pmd = vmem_alloc_pages(2);
55 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59 static pte_t __ref *vmem_pte_alloc(void)
63 if (slab_is_available())
64 pte = (pte_t *) page_table_alloc(&init_mm);
66 pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
67 PTRS_PER_PTE * sizeof(pte_t));
70 clear_table((unsigned long *) pte, _PAGE_INVALID,
71 PTRS_PER_PTE * sizeof(pte_t));
76 * Add a physical memory range to the 1:1 mapping.
78 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
80 unsigned long end = start + size;
81 unsigned long address = start;
88 while (address < end) {
89 pg_dir = pgd_offset_k(address);
90 if (pgd_none(*pg_dir)) {
91 pu_dir = vmem_pud_alloc();
94 pgd_populate(&init_mm, pg_dir, pu_dir);
96 pu_dir = pud_offset(pg_dir, address);
97 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
98 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
99 !debug_pagealloc_enabled()) {
100 pud_val(*pu_dir) = __pa(address) |
101 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
102 (ro ? _REGION_ENTRY_PROTECT : 0);
106 if (pud_none(*pu_dir)) {
107 pm_dir = vmem_pmd_alloc();
110 pud_populate(&init_mm, pu_dir, pm_dir);
112 pm_dir = pmd_offset(pu_dir, address);
113 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
114 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
115 !debug_pagealloc_enabled()) {
116 pmd_val(*pm_dir) = __pa(address) |
117 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
118 _SEGMENT_ENTRY_YOUNG |
119 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
123 if (pmd_none(*pm_dir)) {
124 pt_dir = vmem_pte_alloc();
127 pmd_populate(&init_mm, pm_dir, pt_dir);
130 pt_dir = pte_offset_kernel(pm_dir, address);
131 pte_val(*pt_dir) = __pa(address) |
132 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
133 address += PAGE_SIZE;
141 * Remove a physical memory range from the 1:1 mapping.
142 * Currently only invalidates page table entries.
144 static void vmem_remove_range(unsigned long start, unsigned long size)
146 unsigned long end = start + size;
147 unsigned long address = start;
154 pte_val(pte) = _PAGE_INVALID;
155 while (address < end) {
156 pg_dir = pgd_offset_k(address);
157 if (pgd_none(*pg_dir)) {
158 address += PGDIR_SIZE;
161 pu_dir = pud_offset(pg_dir, address);
162 if (pud_none(*pu_dir)) {
166 if (pud_large(*pu_dir)) {
171 pm_dir = pmd_offset(pu_dir, address);
172 if (pmd_none(*pm_dir)) {
176 if (pmd_large(*pm_dir)) {
181 pt_dir = pte_offset_kernel(pm_dir, address);
183 address += PAGE_SIZE;
185 flush_tlb_kernel_range(start, end);
189 * Add a backed mem_map array to the virtual mem_map array.
191 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
193 unsigned long address = start;
200 for (address = start; address < end;) {
201 pg_dir = pgd_offset_k(address);
202 if (pgd_none(*pg_dir)) {
203 pu_dir = vmem_pud_alloc();
206 pgd_populate(&init_mm, pg_dir, pu_dir);
209 pu_dir = pud_offset(pg_dir, address);
210 if (pud_none(*pu_dir)) {
211 pm_dir = vmem_pmd_alloc();
214 pud_populate(&init_mm, pu_dir, pm_dir);
217 pm_dir = pmd_offset(pu_dir, address);
218 if (pmd_none(*pm_dir)) {
219 /* Use 1MB frames for vmemmap if available. We always
220 * use large frames even if they are only partially
222 * Otherwise we would have also page tables since
223 * vmemmap_populate gets called for each section
225 if (MACHINE_HAS_EDAT1) {
228 new_page = vmemmap_alloc_block(PMD_SIZE, node);
231 pmd_val(*pm_dir) = __pa(new_page) |
232 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
233 address = (address + PMD_SIZE) & PMD_MASK;
236 pt_dir = vmem_pte_alloc();
239 pmd_populate(&init_mm, pm_dir, pt_dir);
240 } else if (pmd_large(*pm_dir)) {
241 address = (address + PMD_SIZE) & PMD_MASK;
245 pt_dir = pte_offset_kernel(pm_dir, address);
246 if (pte_none(*pt_dir)) {
249 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
253 __pa(new_page) | pgprot_val(PAGE_KERNEL);
255 address += PAGE_SIZE;
262 void vmemmap_free(unsigned long start, unsigned long end)
267 * Add memory segment to the segment list if it doesn't overlap with
268 * an already present segment.
270 static int insert_memory_segment(struct memory_segment *seg)
272 struct memory_segment *tmp;
274 if (seg->start + seg->size > VMEM_MAX_PHYS ||
275 seg->start + seg->size < seg->start)
278 list_for_each_entry(tmp, &mem_segs, list) {
279 if (seg->start >= tmp->start + tmp->size)
281 if (seg->start + seg->size <= tmp->start)
285 list_add(&seg->list, &mem_segs);
290 * Remove memory segment from the segment list.
292 static void remove_memory_segment(struct memory_segment *seg)
294 list_del(&seg->list);
297 static void __remove_shared_memory(struct memory_segment *seg)
299 remove_memory_segment(seg);
300 vmem_remove_range(seg->start, seg->size);
303 int vmem_remove_mapping(unsigned long start, unsigned long size)
305 struct memory_segment *seg;
308 mutex_lock(&vmem_mutex);
311 list_for_each_entry(seg, &mem_segs, list) {
312 if (seg->start == start && seg->size == size)
316 if (seg->start != start || seg->size != size)
320 __remove_shared_memory(seg);
323 mutex_unlock(&vmem_mutex);
327 int vmem_add_mapping(unsigned long start, unsigned long size)
329 struct memory_segment *seg;
332 mutex_lock(&vmem_mutex);
334 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
340 ret = insert_memory_segment(seg);
344 ret = vmem_add_mem(start, size, 0);
350 __remove_shared_memory(seg);
354 mutex_unlock(&vmem_mutex);
359 * map whole physical memory to virtual memory (identity mapping)
360 * we reserve enough space in the vmalloc area for vmemmap to hotplug
361 * additional memory segments.
363 void __init vmem_map_init(void)
365 unsigned long ro_start, ro_end;
366 struct memblock_region *reg;
367 phys_addr_t start, end;
369 ro_start = PFN_ALIGN((unsigned long)&_stext);
370 ro_end = (unsigned long)&_eshared & PAGE_MASK;
371 for_each_memblock(memory, reg) {
373 end = reg->base + reg->size;
374 if (start >= ro_end || end <= ro_start)
375 vmem_add_mem(start, end - start, 0);
376 else if (start >= ro_start && end <= ro_end)
377 vmem_add_mem(start, end - start, 1);
378 else if (start >= ro_start) {
379 vmem_add_mem(start, ro_end - start, 1);
380 vmem_add_mem(ro_end, end - ro_end, 0);
381 } else if (end < ro_end) {
382 vmem_add_mem(start, ro_start - start, 0);
383 vmem_add_mem(ro_start, end - ro_start, 1);
385 vmem_add_mem(start, ro_start - start, 0);
386 vmem_add_mem(ro_start, ro_end - ro_start, 1);
387 vmem_add_mem(ro_end, end - ro_end, 0);
393 * Convert memblock.memory to a memory segment list so there is a single
394 * list that contains all memory segments.
396 static int __init vmem_convert_memory_chunk(void)
398 struct memblock_region *reg;
399 struct memory_segment *seg;
401 mutex_lock(&vmem_mutex);
402 for_each_memblock(memory, reg) {
403 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
405 panic("Out of memory...\n");
406 seg->start = reg->base;
407 seg->size = reg->size;
408 insert_memory_segment(seg);
410 mutex_unlock(&vmem_mutex);
414 core_initcall(vmem_convert_memory_chunk);