ba6ae7ffdc2c9d5d1e3bbbb8d2af3b68a2f2572e
[cascardo/linux.git] / arch / sparc / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19
20 /* Heavily inspired by the ppc64 code.  */
21
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23
24 void flush_tlb_pending(void)
25 {
26         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27
28         if (tb->tlb_nr) {
29                 flush_tsb_user(tb);
30
31                 if (CTX_VALID(tb->mm->context)) {
32 #ifdef CONFIG_SMP
33                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34                                               &tb->vaddrs[0]);
35 #else
36                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37                                             tb->tlb_nr, &tb->vaddrs[0]);
38 #endif
39                 }
40                 tb->tlb_nr = 0;
41         }
42
43         put_cpu_var(tlb_batch);
44 }
45
46 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47                               bool exec)
48 {
49         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
50         unsigned long nr;
51
52         vaddr &= PAGE_MASK;
53         if (exec)
54                 vaddr |= 0x1UL;
55
56         nr = tb->tlb_nr;
57
58         if (unlikely(nr != 0 && mm != tb->mm)) {
59                 flush_tlb_pending();
60                 nr = 0;
61         }
62
63         if (nr == 0)
64                 tb->mm = mm;
65
66         tb->vaddrs[nr] = vaddr;
67         tb->tlb_nr = ++nr;
68         if (nr >= TLB_BATCH_NR)
69                 flush_tlb_pending();
70
71         put_cpu_var(tlb_batch);
72 }
73
74 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75                    pte_t *ptep, pte_t orig, int fullmm)
76 {
77         if (tlb_type != hypervisor &&
78             pte_dirty(orig)) {
79                 unsigned long paddr, pfn = pte_pfn(orig);
80                 struct address_space *mapping;
81                 struct page *page;
82
83                 if (!pfn_valid(pfn))
84                         goto no_cache_flush;
85
86                 page = pfn_to_page(pfn);
87                 if (PageReserved(page))
88                         goto no_cache_flush;
89
90                 /* A real file page? */
91                 mapping = page_mapping(page);
92                 if (!mapping)
93                         goto no_cache_flush;
94
95                 paddr = (unsigned long) page_address(page);
96                 if ((paddr ^ vaddr) & (1 << 13))
97                         flush_dcache_page_all(mm, page);
98         }
99
100 no_cache_flush:
101         if (!fullmm)
102                 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
103 }
104
105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
106 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107                                pmd_t pmd, bool exec)
108 {
109         unsigned long end;
110         pte_t *pte;
111
112         pte = pte_offset_map(&pmd, vaddr);
113         end = vaddr + HPAGE_SIZE;
114         while (vaddr < end) {
115                 if (pte_val(*pte) & _PAGE_VALID)
116                         tlb_batch_add_one(mm, vaddr, exec);
117                 pte++;
118                 vaddr += PAGE_SIZE;
119         }
120         pte_unmap(pte);
121 }
122
123 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
124                 pmd_t *pmdp, pmd_t pmd)
125 {
126         pmd_t orig = *pmdp;
127
128         *pmdp = pmd;
129
130         if (mm == &init_mm)
131                 return;
132
133         if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134                 if (pmd_val(pmd) & PMD_ISHUGE)
135                         mm->context.huge_pte_count++;
136                 else
137                         mm->context.huge_pte_count--;
138
139                 /* Do not try to allocate the TSB hash table if we
140                  * don't have one already.  We have various locks held
141                  * and thus we'll end up doing a GFP_KERNEL allocation
142                  * in an atomic context.
143                  *
144                  * Instead, we let the first TLB miss on a hugepage
145                  * take care of this.
146                  */
147         }
148
149         if (!pmd_none(orig)) {
150                 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
151
152                 addr &= HPAGE_MASK;
153                 if (pmd_val(orig) & PMD_ISHUGE)
154                         tlb_batch_add_one(mm, addr, exec);
155                 else
156                         tlb_batch_pmd_scan(mm, addr, orig, exec);
157         }
158 }
159
160 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
161 {
162         struct list_head *lh = (struct list_head *) pgtable;
163
164         assert_spin_locked(&mm->page_table_lock);
165
166         /* FIFO */
167         if (!mm->pmd_huge_pte)
168                 INIT_LIST_HEAD(lh);
169         else
170                 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
171         mm->pmd_huge_pte = pgtable;
172 }
173
174 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
175 {
176         struct list_head *lh;
177         pgtable_t pgtable;
178
179         assert_spin_locked(&mm->page_table_lock);
180
181         /* FIFO */
182         pgtable = mm->pmd_huge_pte;
183         lh = (struct list_head *) pgtable;
184         if (list_empty(lh))
185                 mm->pmd_huge_pte = NULL;
186         else {
187                 mm->pmd_huge_pte = (pgtable_t) lh->next;
188                 list_del(lh);
189         }
190         pte_val(pgtable[0]) = 0;
191         pte_val(pgtable[1]) = 0;
192
193         return pgtable;
194 }
195 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */