4f32a10979dbb503dcb6145d215529232013b5f0
[cascardo/linux.git] / arch / x86 / kernel / paravirt.c
1 /*  Paravirtualization interfaces
2     Copyright (C) 2006 Rusty Russell IBM Corporation
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17
18     2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
19 */
20
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/efi.h>
24 #include <linux/bcd.h>
25 #include <linux/highmem.h>
26 #include <linux/kprobes.h>
27
28 #include <asm/bug.h>
29 #include <asm/paravirt.h>
30 #include <asm/debugreg.h>
31 #include <asm/desc.h>
32 #include <asm/setup.h>
33 #include <asm/pgtable.h>
34 #include <asm/time.h>
35 #include <asm/pgalloc.h>
36 #include <asm/irq.h>
37 #include <asm/delay.h>
38 #include <asm/fixmap.h>
39 #include <asm/apic.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
42 #include <asm/special_insns.h>
43
44 /*
45  * nop stub, which must not clobber anything *including the stack* to
46  * avoid confusing the entry prologues.
47  */
48 extern void _paravirt_nop(void);
49 asm (".pushsection .entry.text, \"ax\"\n"
50      ".global _paravirt_nop\n"
51      "_paravirt_nop:\n\t"
52      "ret\n\t"
53      ".size _paravirt_nop, . - _paravirt_nop\n\t"
54      ".type _paravirt_nop, @function\n\t"
55      ".popsection");
56
57 /* identity function, which can be inlined */
58 u32 _paravirt_ident_32(u32 x)
59 {
60         return x;
61 }
62
63 u64 _paravirt_ident_64(u64 x)
64 {
65         return x;
66 }
67
68 void __init default_banner(void)
69 {
70         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
71                pv_info.name);
72 }
73
74 /* Undefined instruction for dealing with missing ops pointers. */
75 static const unsigned char ud2a[] = { 0x0f, 0x0b };
76
77 struct branch {
78         unsigned char opcode;
79         u32 delta;
80 } __attribute__((packed));
81
82 unsigned paravirt_patch_call(void *insnbuf,
83                              const void *target, u16 tgt_clobbers,
84                              unsigned long addr, u16 site_clobbers,
85                              unsigned len)
86 {
87         struct branch *b = insnbuf;
88         unsigned long delta = (unsigned long)target - (addr+5);
89
90         if (tgt_clobbers & ~site_clobbers)
91                 return len;     /* target would clobber too much for this site */
92         if (len < 5)
93                 return len;     /* call too long for patch site */
94
95         b->opcode = 0xe8; /* call */
96         b->delta = delta;
97         BUILD_BUG_ON(sizeof(*b) != 5);
98
99         return 5;
100 }
101
102 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
103                             unsigned long addr, unsigned len)
104 {
105         struct branch *b = insnbuf;
106         unsigned long delta = (unsigned long)target - (addr+5);
107
108         if (len < 5)
109                 return len;     /* call too long for patch site */
110
111         b->opcode = 0xe9;       /* jmp */
112         b->delta = delta;
113
114         return 5;
115 }
116
117 /* Neat trick to map patch type back to the call within the
118  * corresponding structure. */
119 static void *get_call_destination(u8 type)
120 {
121         struct paravirt_patch_template tmpl = {
122                 .pv_init_ops = pv_init_ops,
123                 .pv_time_ops = pv_time_ops,
124                 .pv_cpu_ops = pv_cpu_ops,
125                 .pv_irq_ops = pv_irq_ops,
126                 .pv_apic_ops = pv_apic_ops,
127                 .pv_mmu_ops = pv_mmu_ops,
128 #ifdef CONFIG_PARAVIRT_SPINLOCKS
129                 .pv_lock_ops = pv_lock_ops,
130 #endif
131         };
132         return *((void **)&tmpl + type);
133 }
134
135 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
136                                 unsigned long addr, unsigned len)
137 {
138         void *opfunc = get_call_destination(type);
139         unsigned ret;
140
141         if (opfunc == NULL)
142                 /* If there's no function, patch it with a ud2a (BUG) */
143                 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
144         else if (opfunc == _paravirt_nop)
145                 ret = 0;
146
147         /* identity functions just return their single argument */
148         else if (opfunc == _paravirt_ident_32)
149                 ret = paravirt_patch_ident_32(insnbuf, len);
150         else if (opfunc == _paravirt_ident_64)
151                 ret = paravirt_patch_ident_64(insnbuf, len);
152
153         else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
154 #ifdef CONFIG_X86_32
155                  type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
156 #endif
157                  type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
158                  type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
159                 /* If operation requires a jmp, then jmp */
160                 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
161         else
162                 /* Otherwise call the function; assume target could
163                    clobber any caller-save reg */
164                 ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
165                                           addr, clobbers, len);
166
167         return ret;
168 }
169
170 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
171                               const char *start, const char *end)
172 {
173         unsigned insn_len = end - start;
174
175         if (insn_len > len || start == NULL)
176                 insn_len = len;
177         else
178                 memcpy(insnbuf, start, insn_len);
179
180         return insn_len;
181 }
182
183 static void native_flush_tlb(void)
184 {
185         __native_flush_tlb();
186 }
187
188 /*
189  * Global pages have to be flushed a bit differently. Not a real
190  * performance problem because this does not happen often.
191  */
192 static void native_flush_tlb_global(void)
193 {
194         __native_flush_tlb_global();
195 }
196
197 static void native_flush_tlb_single(unsigned long addr)
198 {
199         __native_flush_tlb_single(addr);
200 }
201
202 struct static_key paravirt_steal_enabled;
203 struct static_key paravirt_steal_rq_enabled;
204
205 static u64 native_steal_clock(int cpu)
206 {
207         return 0;
208 }
209
210 /* These are in entry.S */
211 extern void native_iret(void);
212 extern void native_irq_enable_sysexit(void);
213 extern void native_usergs_sysret32(void);
214 extern void native_usergs_sysret64(void);
215
216 static struct resource reserve_ioports = {
217         .start = 0,
218         .end = IO_SPACE_LIMIT,
219         .name = "paravirt-ioport",
220         .flags = IORESOURCE_IO | IORESOURCE_BUSY,
221 };
222
223 /*
224  * Reserve the whole legacy IO space to prevent any legacy drivers
225  * from wasting time probing for their hardware.  This is a fairly
226  * brute-force approach to disabling all non-virtual drivers.
227  *
228  * Note that this must be called very early to have any effect.
229  */
230 int paravirt_disable_iospace(void)
231 {
232         return request_resource(&ioport_resource, &reserve_ioports);
233 }
234
235 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
236
237 static inline void enter_lazy(enum paravirt_lazy_mode mode)
238 {
239         BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
240
241         this_cpu_write(paravirt_lazy_mode, mode);
242 }
243
244 static void leave_lazy(enum paravirt_lazy_mode mode)
245 {
246         BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
247
248         this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
249 }
250
251 void paravirt_enter_lazy_mmu(void)
252 {
253         enter_lazy(PARAVIRT_LAZY_MMU);
254 }
255
256 void paravirt_leave_lazy_mmu(void)
257 {
258         leave_lazy(PARAVIRT_LAZY_MMU);
259 }
260
261 void paravirt_flush_lazy_mmu(void)
262 {
263         preempt_disable();
264
265         if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
266                 arch_leave_lazy_mmu_mode();
267                 arch_enter_lazy_mmu_mode();
268         }
269
270         preempt_enable();
271 }
272
273 void paravirt_start_context_switch(struct task_struct *prev)
274 {
275         BUG_ON(preemptible());
276
277         if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
278                 arch_leave_lazy_mmu_mode();
279                 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
280         }
281         enter_lazy(PARAVIRT_LAZY_CPU);
282 }
283
284 void paravirt_end_context_switch(struct task_struct *next)
285 {
286         BUG_ON(preemptible());
287
288         leave_lazy(PARAVIRT_LAZY_CPU);
289
290         if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
291                 arch_enter_lazy_mmu_mode();
292 }
293
294 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
295 {
296         if (in_interrupt())
297                 return PARAVIRT_LAZY_NONE;
298
299         return this_cpu_read(paravirt_lazy_mode);
300 }
301
302 struct pv_info pv_info = {
303         .name = "bare hardware",
304         .paravirt_enabled = 0,
305         .kernel_rpl = 0,
306         .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
307
308 #ifdef CONFIG_X86_64
309         .extra_user_64bit_cs = __USER_CS,
310 #endif
311 };
312
313 struct pv_init_ops pv_init_ops = {
314         .patch = native_patch,
315 };
316
317 struct pv_time_ops pv_time_ops = {
318         .sched_clock = native_sched_clock,
319         .steal_clock = native_steal_clock,
320 };
321
322 __visible struct pv_irq_ops pv_irq_ops = {
323         .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
324         .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
325         .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
326         .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
327         .safe_halt = native_safe_halt,
328         .halt = native_halt,
329 #ifdef CONFIG_X86_64
330         .adjust_exception_frame = paravirt_nop,
331 #endif
332 };
333
334 __visible struct pv_cpu_ops pv_cpu_ops = {
335         .cpuid = native_cpuid,
336         .get_debugreg = native_get_debugreg,
337         .set_debugreg = native_set_debugreg,
338         .clts = native_clts,
339         .read_cr0 = native_read_cr0,
340         .write_cr0 = native_write_cr0,
341         .read_cr4 = native_read_cr4,
342         .read_cr4_safe = native_read_cr4_safe,
343         .write_cr4 = native_write_cr4,
344 #ifdef CONFIG_X86_64
345         .read_cr8 = native_read_cr8,
346         .write_cr8 = native_write_cr8,
347 #endif
348         .wbinvd = native_wbinvd,
349         .read_msr = native_read_msr_safe,
350         .write_msr = native_write_msr_safe,
351         .read_pmc = native_read_pmc,
352         .load_tr_desc = native_load_tr_desc,
353         .set_ldt = native_set_ldt,
354         .load_gdt = native_load_gdt,
355         .load_idt = native_load_idt,
356         .store_idt = native_store_idt,
357         .store_tr = native_store_tr,
358         .load_tls = native_load_tls,
359 #ifdef CONFIG_X86_64
360         .load_gs_index = native_load_gs_index,
361 #endif
362         .write_ldt_entry = native_write_ldt_entry,
363         .write_gdt_entry = native_write_gdt_entry,
364         .write_idt_entry = native_write_idt_entry,
365
366         .alloc_ldt = paravirt_nop,
367         .free_ldt = paravirt_nop,
368
369         .load_sp0 = native_load_sp0,
370
371 #if defined(CONFIG_X86_32)
372         .irq_enable_sysexit = native_irq_enable_sysexit,
373 #endif
374 #ifdef CONFIG_X86_64
375 #ifdef CONFIG_IA32_EMULATION
376         .usergs_sysret32 = native_usergs_sysret32,
377 #endif
378         .usergs_sysret64 = native_usergs_sysret64,
379 #endif
380         .iret = native_iret,
381         .swapgs = native_swapgs,
382
383         .set_iopl_mask = native_set_iopl_mask,
384         .io_delay = native_io_delay,
385
386         .start_context_switch = paravirt_nop,
387         .end_context_switch = paravirt_nop,
388 };
389
390 /* At this point, native_get/set_debugreg has real function entries */
391 NOKPROBE_SYMBOL(native_get_debugreg);
392 NOKPROBE_SYMBOL(native_set_debugreg);
393 NOKPROBE_SYMBOL(native_load_idt);
394
395 struct pv_apic_ops pv_apic_ops = {
396 #ifdef CONFIG_X86_LOCAL_APIC
397         .startup_ipi_hook = paravirt_nop,
398 #endif
399 };
400
401 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
402 /* 32-bit pagetable entries */
403 #define PTE_IDENT       __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
404 #else
405 /* 64-bit pagetable entries */
406 #define PTE_IDENT       __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
407 #endif
408
409 struct pv_mmu_ops pv_mmu_ops = {
410
411         .read_cr2 = native_read_cr2,
412         .write_cr2 = native_write_cr2,
413         .read_cr3 = native_read_cr3,
414         .write_cr3 = native_write_cr3,
415
416         .flush_tlb_user = native_flush_tlb,
417         .flush_tlb_kernel = native_flush_tlb_global,
418         .flush_tlb_single = native_flush_tlb_single,
419         .flush_tlb_others = native_flush_tlb_others,
420
421         .pgd_alloc = __paravirt_pgd_alloc,
422         .pgd_free = paravirt_nop,
423
424         .alloc_pte = paravirt_nop,
425         .alloc_pmd = paravirt_nop,
426         .alloc_pud = paravirt_nop,
427         .release_pte = paravirt_nop,
428         .release_pmd = paravirt_nop,
429         .release_pud = paravirt_nop,
430
431         .set_pte = native_set_pte,
432         .set_pte_at = native_set_pte_at,
433         .set_pmd = native_set_pmd,
434         .set_pmd_at = native_set_pmd_at,
435         .pte_update = paravirt_nop,
436         .pte_update_defer = paravirt_nop,
437         .pmd_update = paravirt_nop,
438         .pmd_update_defer = paravirt_nop,
439
440         .ptep_modify_prot_start = __ptep_modify_prot_start,
441         .ptep_modify_prot_commit = __ptep_modify_prot_commit,
442
443 #if CONFIG_PGTABLE_LEVELS >= 3
444 #ifdef CONFIG_X86_PAE
445         .set_pte_atomic = native_set_pte_atomic,
446         .pte_clear = native_pte_clear,
447         .pmd_clear = native_pmd_clear,
448 #endif
449         .set_pud = native_set_pud,
450
451         .pmd_val = PTE_IDENT,
452         .make_pmd = PTE_IDENT,
453
454 #if CONFIG_PGTABLE_LEVELS == 4
455         .pud_val = PTE_IDENT,
456         .make_pud = PTE_IDENT,
457
458         .set_pgd = native_set_pgd,
459 #endif
460 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
461
462         .pte_val = PTE_IDENT,
463         .pgd_val = PTE_IDENT,
464
465         .make_pte = PTE_IDENT,
466         .make_pgd = PTE_IDENT,
467
468         .dup_mmap = paravirt_nop,
469         .exit_mmap = paravirt_nop,
470         .activate_mm = paravirt_nop,
471
472         .lazy_mode = {
473                 .enter = paravirt_nop,
474                 .leave = paravirt_nop,
475                 .flush = paravirt_nop,
476         },
477
478         .set_fixmap = native_set_fixmap,
479 };
480
481 EXPORT_SYMBOL_GPL(pv_time_ops);
482 EXPORT_SYMBOL    (pv_cpu_ops);
483 EXPORT_SYMBOL    (pv_mmu_ops);
484 EXPORT_SYMBOL_GPL(pv_apic_ops);
485 EXPORT_SYMBOL_GPL(pv_info);
486 EXPORT_SYMBOL    (pv_irq_ops);