1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
18 * Smarter SMP flushing macros.
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
26 * More scalable flush, from Andi Kleen
28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
33 struct flush_tlb_info {
34 struct mm_struct *flush_mm;
35 unsigned long flush_start;
36 unsigned long flush_end;
40 * We cannot call mmdrop() because we are in interrupt context,
41 * instead update mm->cpu_vm_mask.
43 void leave_mm(int cpu)
45 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
48 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
49 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
50 load_cr3(swapper_pg_dir);
52 * This gets called in the idle path where RCU
53 * functions differently. Tracing normally
54 * uses RCU, so we have to call the tracepoint
57 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
60 EXPORT_SYMBOL_GPL(leave_mm);
63 * The flush IPI assumes that a thread switch happens in this order:
64 * [cpu0: the cpu that switches]
65 * 1) switch_mm() either 1a) or 1b)
66 * 1a) thread switch to a different mm
67 * 1a1) set cpu_tlbstate to TLBSTATE_OK
68 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
69 * if cpu0 was in lazy tlb mode.
70 * 1a2) update cpu active_mm
71 * Now cpu0 accepts tlb flushes for the new mm.
72 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
73 * Now the other cpus will send tlb flush ipis.
75 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
76 * Stop ipi delivery for the old mm. This is not synchronized with
77 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
78 * mm, and in the worst case we perform a superfluous tlb flush.
79 * 1b) thread switch without mm change
80 * cpu active_mm is correct, cpu0 already handles flush ipis.
81 * 1b1) set cpu_tlbstate to TLBSTATE_OK
82 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
83 * Atomically set the bit [other cpus will start sending flush ipis],
85 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
86 * 2) switch %%esp, ie current
88 * The interrupt must handle 2 special cases:
89 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
90 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
91 * runs in kernel space, the cpu could load tlb entries for user space
94 * The good news is that cpu_tlbstate is local to each cpu, no
95 * write/read ordering problems.
99 * TLB flush funcation:
100 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
101 * 2) Leave the mm if we are in the lazy tlb mode.
103 static void flush_tlb_func(void *info)
105 struct flush_tlb_info *f = info;
107 inc_irq_stat(irq_tlb_count);
109 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
114 if (f->flush_end == TLB_FLUSH_ALL) {
116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
119 unsigned long nr_pages =
120 (f->flush_end - f->flush_start) / PAGE_SIZE;
121 addr = f->flush_start;
122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr);
126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
129 leave_mm(smp_processor_id());
133 void native_flush_tlb_others(const struct cpumask *cpumask,
134 struct mm_struct *mm, unsigned long start,
137 struct flush_tlb_info info;
140 end = start + PAGE_SIZE;
142 info.flush_start = start;
143 info.flush_end = end;
145 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
146 if (end == TLB_FLUSH_ALL)
147 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
149 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
150 (end - start) >> PAGE_SHIFT);
152 if (is_uv_system()) {
155 cpu = smp_processor_id();
156 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
158 smp_call_function_many(cpumask, flush_tlb_func,
162 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
165 void flush_tlb_current_task(void)
167 struct mm_struct *mm = current->mm;
171 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
173 /* This is an implicit full barrier that synchronizes with switch_mm. */
176 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
177 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
178 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
183 * See Documentation/x86/tlb.txt for details. We choose 33
184 * because it is large enough to cover the vast majority (at
185 * least 95%) of allocations, and is small enough that we are
186 * confident it will not cause too much overhead. Each single
187 * flush is about 100 ns, so this caps the maximum overhead at
190 * This is in units of pages.
192 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
194 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
195 unsigned long end, unsigned long vmflag)
198 /* do a global flush by default */
199 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
202 if (current->active_mm != mm) {
203 /* Synchronize with switch_mm. */
210 leave_mm(smp_processor_id());
212 /* Synchronize with switch_mm. */
218 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
219 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
222 * Both branches below are implicit full barriers (MOV to CR or
223 * INVLPG) that synchronize with switch_mm.
225 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
226 base_pages_to_flush = TLB_FLUSH_ALL;
227 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
230 /* flush range by one by one 'invlpg' */
231 for (addr = start; addr < end; addr += PAGE_SIZE) {
232 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
233 __flush_tlb_single(addr);
236 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
238 if (base_pages_to_flush == TLB_FLUSH_ALL) {
242 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
243 flush_tlb_others(mm_cpumask(mm), mm, start, end);
247 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
249 struct mm_struct *mm = vma->vm_mm;
253 if (current->active_mm == mm) {
256 * Implicit full barrier (INVLPG) that synchronizes
259 __flush_tlb_one(start);
261 leave_mm(smp_processor_id());
263 /* Synchronize with switch_mm. */
268 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
269 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
274 static void do_flush_tlb_all(void *info)
276 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
278 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
279 leave_mm(smp_processor_id());
282 void flush_tlb_all(void)
284 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
285 on_each_cpu(do_flush_tlb_all, NULL, 1);
288 static void do_kernel_range_flush(void *info)
290 struct flush_tlb_info *f = info;
293 /* flush range by one by one 'invlpg' */
294 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
295 __flush_tlb_single(addr);
298 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
301 /* Balance as user space task's flush, a bit conservative */
302 if (end == TLB_FLUSH_ALL ||
303 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
304 on_each_cpu(do_flush_tlb_all, NULL, 1);
306 struct flush_tlb_info info;
307 info.flush_start = start;
308 info.flush_end = end;
309 on_each_cpu(do_kernel_range_flush, &info, 1);
313 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
314 size_t count, loff_t *ppos)
319 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
320 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
323 static ssize_t tlbflush_write_file(struct file *file,
324 const char __user *user_buf, size_t count, loff_t *ppos)
330 len = min(count, sizeof(buf) - 1);
331 if (copy_from_user(buf, user_buf, len))
335 if (kstrtoint(buf, 0, &ceiling))
341 tlb_single_page_flush_ceiling = ceiling;
345 static const struct file_operations fops_tlbflush = {
346 .read = tlbflush_read_file,
347 .write = tlbflush_write_file,
348 .llseek = default_llseek,
351 static int __init create_tlb_single_page_flush_ceiling(void)
353 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
354 arch_debugfs_dir, NULL, &fops_tlbflush);
357 late_initcall(create_tlb_single_page_flush_ceiling);
359 #endif /* CONFIG_SMP */