1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <asm/fixmap.h>
23 #define MAX_PATCH_LEN (255-1)
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once;
28 static int __init bootonly(char *str)
33 __setup("smp-alt-boot", bootonly);
35 #define smp_alt_once 1
38 static int __initdata_or_module debug_alternative;
40 static int __init debug_alt(char *str)
42 debug_alternative = 1;
45 __setup("debug-alternative", debug_alt);
47 static int noreplace_smp;
49 static int __init setup_noreplace_smp(char *str)
54 __setup("noreplace-smp", setup_noreplace_smp);
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt = 0;
59 static int __init setup_noreplace_paravirt(char *str)
61 noreplace_paravirt = 1;
64 __setup("noreplace-paravirt", setup_noreplace_paravirt);
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80 static const unsigned char intelnops[] =
92 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
98 intelnops + 1 + 2 + 3,
99 intelnops + 1 + 2 + 3 + 4,
100 intelnops + 1 + 2 + 3 + 4 + 5,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108 static const unsigned char k8nops[] =
120 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
127 k8nops + 1 + 2 + 3 + 4,
128 k8nops + 1 + 2 + 3 + 4 + 5,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops[] =
148 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
155 k7nops + 1 + 2 + 3 + 4,
156 k7nops + 1 + 2 + 3 + 4 + 5,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164 static const unsigned char __initconst_or_module p6nops[] =
176 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
183 p6nops + 1 + 2 + 3 + 4,
184 p6nops + 1 + 2 + 3 + 4 + 5,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
191 /* Initialize these to a safe default */
193 const unsigned char * const *ideal_nops = p6_nops;
195 const unsigned char * const *ideal_nops = intel_nops;
198 void __init arch_init_ideal_nops(void)
200 switch (boot_cpu_data.x86_vendor) {
201 case X86_VENDOR_INTEL:
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
207 if (boot_cpu_data.x86 == 6 &&
208 boot_cpu_data.x86_model >= 0x0f &&
209 boot_cpu_data.x86_model != 0x1c &&
210 boot_cpu_data.x86_model != 0x26 &&
211 boot_cpu_data.x86_model != 0x27 &&
212 boot_cpu_data.x86_model < 0x30) {
213 ideal_nops = k8_nops;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215 ideal_nops = p6_nops;
218 ideal_nops = k8_nops;
220 ideal_nops = intel_nops;
226 ideal_nops = k8_nops;
228 if (boot_cpu_has(X86_FEATURE_K8))
229 ideal_nops = k8_nops;
230 else if (boot_cpu_has(X86_FEATURE_K7))
231 ideal_nops = k7_nops;
233 ideal_nops = intel_nops;
238 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
239 static void __init_or_module add_nops(void *insns, unsigned int len)
242 unsigned int noplen = len;
243 if (noplen > ASM_NOP_MAX)
244 noplen = ASM_NOP_MAX;
245 memcpy(insns, ideal_nops[noplen], noplen);
251 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
252 extern s32 __smp_locks[], __smp_locks_end[];
253 extern char __vsyscall_0;
254 void *text_poke_early(void *addr, const void *opcode, size_t len);
256 /* Replace instructions with better alternatives for this CPU type.
257 This runs before SMP is initialized to avoid SMP problems with
258 self modifying code. This implies that asymmetric systems where
259 APs have less capabilities than the boot processor are not handled.
260 Tough. Make sure you disable such features by hand. */
262 void __init_or_module apply_alternatives(struct alt_instr *start,
263 struct alt_instr *end)
266 u8 *instr, *replacement;
267 u8 insnbuf[MAX_PATCH_LEN];
269 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
271 * The scan order should be from start to end. A later scanned
272 * alternative code can overwrite a previous scanned alternative code.
273 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
276 * So be careful if you want to change the scan order to any other
279 for (a = start; a < end; a++) {
280 instr = (u8 *)&a->instr_offset + a->instr_offset;
281 replacement = (u8 *)&a->repl_offset + a->repl_offset;
282 BUG_ON(a->replacementlen > a->instrlen);
283 BUG_ON(a->instrlen > sizeof(insnbuf));
284 BUG_ON(a->cpuid >= NCAPINTS*32);
285 if (!boot_cpu_has(a->cpuid))
288 memcpy(insnbuf, replacement, a->replacementlen);
290 /* 0xe8 is a relative jump; fix the offset. */
291 if (*insnbuf == 0xe8 && a->replacementlen == 5)
292 *(s32 *)(insnbuf + 1) += replacement - instr;
294 add_nops(insnbuf + a->replacementlen,
295 a->instrlen - a->replacementlen);
298 /* vsyscall code is not mapped yet. resolve it manually. */
299 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
300 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
303 text_poke_early(instr, insnbuf, a->instrlen);
309 static void alternatives_smp_lock(const s32 *start, const s32 *end,
310 u8 *text, u8 *text_end)
314 mutex_lock(&text_mutex);
315 for (poff = start; poff < end; poff++) {
316 u8 *ptr = (u8 *)poff + *poff;
318 if (!*poff || ptr < text || ptr >= text_end)
320 /* turn DS segment override prefix into lock prefix */
322 text_poke(ptr, ((unsigned char []){0xf0}), 1);
324 mutex_unlock(&text_mutex);
327 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
328 u8 *text, u8 *text_end)
335 mutex_lock(&text_mutex);
336 for (poff = start; poff < end; poff++) {
337 u8 *ptr = (u8 *)poff + *poff;
339 if (!*poff || ptr < text || ptr >= text_end)
341 /* turn lock prefix into DS segment override prefix */
343 text_poke(ptr, ((unsigned char []){0x3E}), 1);
345 mutex_unlock(&text_mutex);
348 struct smp_alt_module {
349 /* what is this ??? */
353 /* ptrs to lock prefixes */
355 const s32 *locks_end;
357 /* .text segment, needed to avoid patching init code ;) */
361 struct list_head next;
363 static LIST_HEAD(smp_alt_modules);
364 static DEFINE_MUTEX(smp_alt);
365 static int smp_mode = 1; /* protected by smp_alt */
367 void __init_or_module alternatives_smp_module_add(struct module *mod,
369 void *locks, void *locks_end,
370 void *text, void *text_end)
372 struct smp_alt_module *smp;
378 if (boot_cpu_has(X86_FEATURE_UP))
379 alternatives_smp_unlock(locks, locks_end,
384 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
386 return; /* we'll run the (safe but slow) SMP code then ... */
391 smp->locks_end = locks_end;
393 smp->text_end = text_end;
394 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
395 __func__, smp->locks, smp->locks_end,
396 smp->text, smp->text_end, smp->name);
398 mutex_lock(&smp_alt);
399 list_add_tail(&smp->next, &smp_alt_modules);
400 if (boot_cpu_has(X86_FEATURE_UP))
401 alternatives_smp_unlock(smp->locks, smp->locks_end,
402 smp->text, smp->text_end);
403 mutex_unlock(&smp_alt);
406 void __init_or_module alternatives_smp_module_del(struct module *mod)
408 struct smp_alt_module *item;
410 if (smp_alt_once || noreplace_smp)
413 mutex_lock(&smp_alt);
414 list_for_each_entry(item, &smp_alt_modules, next) {
415 if (mod != item->mod)
417 list_del(&item->next);
418 mutex_unlock(&smp_alt);
419 DPRINTK("%s: %s\n", __func__, item->name);
423 mutex_unlock(&smp_alt);
426 bool skip_smp_alternatives;
427 void alternatives_smp_switch(int smp)
429 struct smp_alt_module *mod;
431 #ifdef CONFIG_LOCKDEP
433 * Older binutils section handling bug prevented
434 * alternatives-replacement from working reliably.
436 * If this still occurs then you should see a hang
437 * or crash shortly after this line:
439 printk("lockdep: fixing up alternatives.\n");
442 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
444 BUG_ON(!smp && (num_online_cpus() > 1));
446 mutex_lock(&smp_alt);
449 * Avoid unnecessary switches because it forces JIT based VMs to
450 * throw away all cached translations, which can be quite costly.
452 if (smp == smp_mode) {
455 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
456 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
457 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
458 list_for_each_entry(mod, &smp_alt_modules, next)
459 alternatives_smp_lock(mod->locks, mod->locks_end,
460 mod->text, mod->text_end);
462 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
463 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
464 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
465 list_for_each_entry(mod, &smp_alt_modules, next)
466 alternatives_smp_unlock(mod->locks, mod->locks_end,
467 mod->text, mod->text_end);
470 mutex_unlock(&smp_alt);
473 /* Return 1 if the address range is reserved for smp-alternatives */
474 int alternatives_text_reserved(void *start, void *end)
476 struct smp_alt_module *mod;
478 u8 *text_start = start;
481 list_for_each_entry(mod, &smp_alt_modules, next) {
482 if (mod->text > text_end || mod->text_end < text_start)
484 for (poff = mod->locks; poff < mod->locks_end; poff++) {
485 const u8 *ptr = (const u8 *)poff + *poff;
487 if (text_start <= ptr && text_end > ptr)
496 #ifdef CONFIG_PARAVIRT
497 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
498 struct paravirt_patch_site *end)
500 struct paravirt_patch_site *p;
501 char insnbuf[MAX_PATCH_LEN];
503 if (noreplace_paravirt)
506 for (p = start; p < end; p++) {
509 BUG_ON(p->len > MAX_PATCH_LEN);
510 /* prep the buffer with the original instructions */
511 memcpy(insnbuf, p->instr, p->len);
512 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
513 (unsigned long)p->instr, p->len);
515 BUG_ON(used > p->len);
517 /* Pad the rest with nops */
518 add_nops(insnbuf + used, p->len - used);
519 text_poke_early(p->instr, insnbuf, p->len);
522 extern struct paravirt_patch_site __start_parainstructions[],
523 __stop_parainstructions[];
524 #endif /* CONFIG_PARAVIRT */
526 void __init alternative_instructions(void)
528 /* The patching is not fully atomic, so try to avoid local interruptions
529 that might execute the to be patched code.
530 Other CPUs are not running. */
534 * Don't stop machine check exceptions while patching.
535 * MCEs only happen when something got corrupted and in this
536 * case we must do something about the corruption.
537 * Ignoring it is worse than a unlikely patching race.
538 * Also machine checks tend to be broadcast and if one CPU
539 * goes into machine check the others follow quickly, so we don't
540 * expect a machine check to cause undue problems during to code
544 apply_alternatives(__alt_instructions, __alt_instructions_end);
546 /* switch to patch-once-at-boottime-only mode and free the
547 * tables in case we know the number of CPUs will never ever
549 #ifdef CONFIG_HOTPLUG_CPU
550 if (num_possible_cpus() < 2)
556 if (1 == num_possible_cpus()) {
557 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
558 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
559 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
561 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
565 alternatives_smp_module_add(NULL, "core kernel",
566 __smp_locks, __smp_locks_end,
569 /* Only switch to UP mode if we don't immediately boot others */
570 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
571 alternatives_smp_switch(0);
574 apply_paravirt(__parainstructions, __parainstructions_end);
577 free_init_pages("SMP alternatives",
578 (unsigned long)__smp_locks,
579 (unsigned long)__smp_locks_end);
585 * text_poke_early - Update instructions on a live kernel at boot time
586 * @addr: address to modify
587 * @opcode: source of the copy
588 * @len: length to copy
590 * When you use this code to patch more than one byte of an instruction
591 * you need to make sure that other CPUs cannot execute this code in parallel.
592 * Also no thread must be currently preempted in the middle of these
593 * instructions. And on the local CPU you need to be protected again NMI or MCE
594 * handlers seeing an inconsistent instruction while you patch.
596 void *__init_or_module text_poke_early(void *addr, const void *opcode,
600 local_irq_save(flags);
601 memcpy(addr, opcode, len);
603 local_irq_restore(flags);
604 /* Could also do a CLFLUSH here to speed up CPU recovery; but
605 that causes hangs on some VIA CPUs. */
610 * text_poke - Update instructions on a live kernel
611 * @addr: address to modify
612 * @opcode: source of the copy
613 * @len: length to copy
615 * Only atomic text poke/set should be allowed when not doing early patching.
616 * It means the size must be writable atomically and the address must be aligned
617 * in a way that permits an atomic write. It also makes sure we fit on a single
620 * Note: Must be called under text_mutex.
622 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
626 struct page *pages[2];
629 if (!core_kernel_text((unsigned long)addr)) {
630 pages[0] = vmalloc_to_page(addr);
631 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
633 pages[0] = virt_to_page(addr);
634 WARN_ON(!PageReserved(pages[0]));
635 pages[1] = virt_to_page(addr + PAGE_SIZE);
638 local_irq_save(flags);
639 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
641 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
642 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
643 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
644 clear_fixmap(FIX_TEXT_POKE0);
646 clear_fixmap(FIX_TEXT_POKE1);
649 /* Could also do a CLFLUSH here to speed up CPU recovery; but
650 that causes hangs on some VIA CPUs. */
651 for (i = 0; i < len; i++)
652 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
653 local_irq_restore(flags);
658 * Cross-modifying kernel text with stop_machine().
659 * This code originally comes from immediate value.
661 static atomic_t stop_machine_first;
662 static int wrote_text;
664 struct text_poke_params {
665 struct text_poke_param *params;
669 static int __kprobes stop_machine_text_poke(void *data)
671 struct text_poke_params *tpp = data;
672 struct text_poke_param *p;
675 if (atomic_dec_and_test(&stop_machine_first)) {
676 for (i = 0; i < tpp->nparams; i++) {
678 text_poke(p->addr, p->opcode, p->len);
680 smp_wmb(); /* Make sure other cpus see that this has run */
685 smp_mb(); /* Load wrote_text before following execution */
688 for (i = 0; i < tpp->nparams; i++) {
690 flush_icache_range((unsigned long)p->addr,
691 (unsigned long)p->addr + p->len);
694 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
695 * that a core serializing instruction such as "cpuid" should be
696 * executed on _each_ core before the new instruction is made visible.
703 * text_poke_smp - Update instructions on a live kernel on SMP
704 * @addr: address to modify
705 * @opcode: source of the copy
706 * @len: length to copy
708 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
709 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
710 * should be allowed, since stop_machine() does _not_ protect code against
713 * Note: Must be called under get_online_cpus() and text_mutex.
715 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
717 struct text_poke_params tpp;
718 struct text_poke_param p;
725 atomic_set(&stop_machine_first, 1);
727 /* Use __stop_machine() because the caller already got online_cpus. */
728 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
733 * text_poke_smp_batch - Update instructions on a live kernel on SMP
734 * @params: an array of text_poke parameters
735 * @n: the number of elements in params.
737 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
738 * stop_machine() is heavy task, it is better to aggregate text_poke requests
739 * and do it once if possible.
741 * Note: Must be called under get_online_cpus() and text_mutex.
743 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
745 struct text_poke_params tpp = {.params = params, .nparams = n};
747 atomic_set(&stop_machine_first, 1);
749 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);