2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
42 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
45 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
47 void *shadow_start, *shadow_end;
49 shadow_start = kasan_mem_to_shadow(address);
50 shadow_end = kasan_mem_to_shadow(address + size);
52 memset(shadow_start, value, shadow_end - shadow_start);
55 void kasan_unpoison_shadow(const void *address, size_t size)
57 kasan_poison_shadow(address, size, 0);
59 if (size & KASAN_SHADOW_MASK) {
60 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
61 *shadow = size & KASAN_SHADOW_MASK;
65 static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
67 void *base = task_stack_page(task);
68 size_t size = sp - base;
70 kasan_unpoison_shadow(base, size);
73 /* Unpoison the entire stack for a task. */
74 void kasan_unpoison_task_stack(struct task_struct *task)
76 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
79 /* Unpoison the stack for the current task beyond a watermark sp value. */
80 asmlinkage void kasan_unpoison_remaining_stack(void *sp)
82 __kasan_unpoison_stack(current, sp);
86 * All functions below always inlined so compiler could
87 * perform better optimizations in each of __asan_loadX/__assn_storeX
88 * depending on memory access size X.
91 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
93 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
95 if (unlikely(shadow_value)) {
96 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
97 return unlikely(last_accessible_byte >= shadow_value);
103 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
105 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
107 if (unlikely(*shadow_addr)) {
108 if (memory_is_poisoned_1(addr + 1))
112 * If single shadow byte covers 2-byte access, we don't
113 * need to do anything more. Otherwise, test the first
116 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
119 return unlikely(*(u8 *)shadow_addr);
125 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
127 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
129 if (unlikely(*shadow_addr)) {
130 if (memory_is_poisoned_1(addr + 3))
134 * If single shadow byte covers 4-byte access, we don't
135 * need to do anything more. Otherwise, test the first
138 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
141 return unlikely(*(u8 *)shadow_addr);
147 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
149 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
151 if (unlikely(*shadow_addr)) {
152 if (memory_is_poisoned_1(addr + 7))
156 * If single shadow byte covers 8-byte access, we don't
157 * need to do anything more. Otherwise, test the first
160 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
163 return unlikely(*(u8 *)shadow_addr);
169 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
171 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
173 if (unlikely(*shadow_addr)) {
174 u16 shadow_first_bytes = *(u16 *)shadow_addr;
176 if (unlikely(shadow_first_bytes))
180 * If two shadow bytes covers 16-byte access, we don't
181 * need to do anything more. Otherwise, test the last
184 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
187 return memory_is_poisoned_1(addr + 15);
193 static __always_inline unsigned long bytes_is_zero(const u8 *start,
197 if (unlikely(*start))
198 return (unsigned long)start;
206 static __always_inline unsigned long memory_is_zero(const void *start,
211 unsigned int prefix = (unsigned long)start % 8;
213 if (end - start <= 16)
214 return bytes_is_zero(start, end - start);
218 ret = bytes_is_zero(start, prefix);
224 words = (end - start) / 8;
226 if (unlikely(*(u64 *)start))
227 return bytes_is_zero(start, 8);
232 return bytes_is_zero(start, (end - start) % 8);
235 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
240 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
241 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
244 unsigned long last_byte = addr + size - 1;
245 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
247 if (unlikely(ret != (unsigned long)last_shadow ||
248 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
254 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
256 if (__builtin_constant_p(size)) {
259 return memory_is_poisoned_1(addr);
261 return memory_is_poisoned_2(addr);
263 return memory_is_poisoned_4(addr);
265 return memory_is_poisoned_8(addr);
267 return memory_is_poisoned_16(addr);
273 return memory_is_poisoned_n(addr, size);
277 static __always_inline void check_memory_region(unsigned long addr,
278 size_t size, bool write)
280 if (unlikely(size == 0))
283 if (unlikely((void *)addr <
284 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
285 kasan_report(addr, size, write, _RET_IP_);
289 if (likely(!memory_is_poisoned(addr, size)))
292 kasan_report(addr, size, write, _RET_IP_);
295 void __asan_loadN(unsigned long addr, size_t size);
296 void __asan_storeN(unsigned long addr, size_t size);
299 void *memset(void *addr, int c, size_t len)
301 __asan_storeN((unsigned long)addr, len);
303 return __memset(addr, c, len);
307 void *memmove(void *dest, const void *src, size_t len)
309 __asan_loadN((unsigned long)src, len);
310 __asan_storeN((unsigned long)dest, len);
312 return __memmove(dest, src, len);
316 void *memcpy(void *dest, const void *src, size_t len)
318 __asan_loadN((unsigned long)src, len);
319 __asan_storeN((unsigned long)dest, len);
321 return __memcpy(dest, src, len);
324 void kasan_alloc_pages(struct page *page, unsigned int order)
326 if (likely(!PageHighMem(page)))
327 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
330 void kasan_free_pages(struct page *page, unsigned int order)
332 if (likely(!PageHighMem(page)))
333 kasan_poison_shadow(page_address(page),
340 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
341 * For larger allocations larger redzones are used.
343 static size_t optimal_redzone(size_t object_size)
346 object_size <= 64 - 16 ? 16 :
347 object_size <= 128 - 32 ? 32 :
348 object_size <= 512 - 64 ? 64 :
349 object_size <= 4096 - 128 ? 128 :
350 object_size <= (1 << 14) - 256 ? 256 :
351 object_size <= (1 << 15) - 512 ? 512 :
352 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
356 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
357 unsigned long *flags)
360 /* Make sure the adjusted size is still less than
361 * KMALLOC_MAX_CACHE_SIZE.
362 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
363 * to skip it for SLUB when it starts using kasan_cache_create().
365 if (*size > KMALLOC_MAX_CACHE_SIZE -
366 sizeof(struct kasan_alloc_meta) -
367 sizeof(struct kasan_free_meta))
369 *flags |= SLAB_KASAN;
370 /* Add alloc meta. */
371 cache->kasan_info.alloc_meta_offset = *size;
372 *size += sizeof(struct kasan_alloc_meta);
375 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
376 cache->object_size < sizeof(struct kasan_free_meta)) {
377 cache->kasan_info.free_meta_offset = *size;
378 *size += sizeof(struct kasan_free_meta);
380 redzone_adjust = optimal_redzone(cache->object_size) -
381 (*size - cache->object_size);
382 if (redzone_adjust > 0)
383 *size += redzone_adjust;
384 *size = min(KMALLOC_MAX_CACHE_SIZE,
387 optimal_redzone(cache->object_size)));
391 void kasan_cache_shrink(struct kmem_cache *cache)
393 quarantine_remove_cache(cache);
396 void kasan_cache_destroy(struct kmem_cache *cache)
398 quarantine_remove_cache(cache);
401 void kasan_poison_slab(struct page *page)
403 kasan_poison_shadow(page_address(page),
404 PAGE_SIZE << compound_order(page),
405 KASAN_KMALLOC_REDZONE);
408 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
410 kasan_unpoison_shadow(object, cache->object_size);
413 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
415 kasan_poison_shadow(object,
416 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
417 KASAN_KMALLOC_REDZONE);
419 if (cache->flags & SLAB_KASAN) {
420 struct kasan_alloc_meta *alloc_info =
421 get_alloc_info(cache, object);
422 alloc_info->state = KASAN_STATE_INIT;
428 static inline int in_irqentry_text(unsigned long ptr)
430 return (ptr >= (unsigned long)&__irqentry_text_start &&
431 ptr < (unsigned long)&__irqentry_text_end) ||
432 (ptr >= (unsigned long)&__softirqentry_text_start &&
433 ptr < (unsigned long)&__softirqentry_text_end);
436 static inline void filter_irq_stacks(struct stack_trace *trace)
440 if (!trace->nr_entries)
442 for (i = 0; i < trace->nr_entries; i++)
443 if (in_irqentry_text(trace->entries[i])) {
444 /* Include the irqentry function into the stack. */
445 trace->nr_entries = i + 1;
450 static inline depot_stack_handle_t save_stack(gfp_t flags)
452 unsigned long entries[KASAN_STACK_DEPTH];
453 struct stack_trace trace = {
456 .max_entries = KASAN_STACK_DEPTH,
460 save_stack_trace(&trace);
461 filter_irq_stacks(&trace);
462 if (trace.nr_entries != 0 &&
463 trace.entries[trace.nr_entries-1] == ULONG_MAX)
466 return depot_save_stack(&trace, flags);
469 static inline void set_track(struct kasan_track *track, gfp_t flags)
471 track->pid = current->pid;
472 track->stack = save_stack(flags);
475 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
478 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
479 return (void *)object + cache->kasan_info.alloc_meta_offset;
482 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
485 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
486 return (void *)object + cache->kasan_info.free_meta_offset;
490 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
492 kasan_kmalloc(cache, object, cache->object_size, flags);
495 void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
497 unsigned long size = cache->object_size;
498 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
500 /* RCU slabs could be legally used after free within the RCU period */
501 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
504 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
507 bool kasan_slab_free(struct kmem_cache *cache, void *object)
510 /* RCU slabs could be legally used after free within the RCU period */
511 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
514 if (likely(cache->flags & SLAB_KASAN)) {
515 struct kasan_alloc_meta *alloc_info =
516 get_alloc_info(cache, object);
517 struct kasan_free_meta *free_info =
518 get_free_info(cache, object);
520 switch (alloc_info->state) {
521 case KASAN_STATE_ALLOC:
522 alloc_info->state = KASAN_STATE_QUARANTINE;
523 quarantine_put(free_info, cache);
524 set_track(&free_info->track, GFP_NOWAIT);
525 kasan_poison_slab_free(cache, object);
527 case KASAN_STATE_QUARANTINE:
528 case KASAN_STATE_FREE:
529 pr_err("Double free");
538 kasan_poison_slab_free(cache, object);
543 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
546 unsigned long redzone_start;
547 unsigned long redzone_end;
549 if (flags & __GFP_RECLAIM)
552 if (unlikely(object == NULL))
555 redzone_start = round_up((unsigned long)(object + size),
556 KASAN_SHADOW_SCALE_SIZE);
557 redzone_end = round_up((unsigned long)object + cache->object_size,
558 KASAN_SHADOW_SCALE_SIZE);
560 kasan_unpoison_shadow(object, size);
561 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
562 KASAN_KMALLOC_REDZONE);
564 if (cache->flags & SLAB_KASAN) {
565 struct kasan_alloc_meta *alloc_info =
566 get_alloc_info(cache, object);
568 alloc_info->state = KASAN_STATE_ALLOC;
569 alloc_info->alloc_size = size;
570 set_track(&alloc_info->track, flags);
574 EXPORT_SYMBOL(kasan_kmalloc);
576 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
579 unsigned long redzone_start;
580 unsigned long redzone_end;
582 if (flags & __GFP_RECLAIM)
585 if (unlikely(ptr == NULL))
588 page = virt_to_page(ptr);
589 redzone_start = round_up((unsigned long)(ptr + size),
590 KASAN_SHADOW_SCALE_SIZE);
591 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
593 kasan_unpoison_shadow(ptr, size);
594 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
598 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
602 if (unlikely(object == ZERO_SIZE_PTR))
605 page = virt_to_head_page(object);
607 if (unlikely(!PageSlab(page)))
608 kasan_kmalloc_large(object, size, flags);
610 kasan_kmalloc(page->slab_cache, object, size, flags);
613 void kasan_kfree(void *ptr)
617 page = virt_to_head_page(ptr);
619 if (unlikely(!PageSlab(page)))
620 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
623 kasan_slab_free(page->slab_cache, ptr);
626 void kasan_kfree_large(const void *ptr)
628 struct page *page = virt_to_page(ptr);
630 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
634 int kasan_module_alloc(void *addr, size_t size)
638 unsigned long shadow_start;
640 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
641 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
644 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
647 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
648 shadow_start + shadow_size,
649 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
650 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
651 __builtin_return_address(0));
654 find_vm_area(addr)->flags |= VM_KASAN;
655 kmemleak_ignore(ret);
662 void kasan_free_shadow(const struct vm_struct *vm)
664 if (vm->flags & VM_KASAN)
665 vfree(kasan_mem_to_shadow(vm->addr));
668 static void register_global(struct kasan_global *global)
670 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
672 kasan_unpoison_shadow(global->beg, global->size);
674 kasan_poison_shadow(global->beg + aligned_size,
675 global->size_with_redzone - aligned_size,
676 KASAN_GLOBAL_REDZONE);
679 void __asan_register_globals(struct kasan_global *globals, size_t size)
683 for (i = 0; i < size; i++)
684 register_global(&globals[i]);
686 EXPORT_SYMBOL(__asan_register_globals);
688 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
691 EXPORT_SYMBOL(__asan_unregister_globals);
693 #define DEFINE_ASAN_LOAD_STORE(size) \
694 void __asan_load##size(unsigned long addr) \
696 check_memory_region(addr, size, false); \
698 EXPORT_SYMBOL(__asan_load##size); \
699 __alias(__asan_load##size) \
700 void __asan_load##size##_noabort(unsigned long); \
701 EXPORT_SYMBOL(__asan_load##size##_noabort); \
702 void __asan_store##size(unsigned long addr) \
704 check_memory_region(addr, size, true); \
706 EXPORT_SYMBOL(__asan_store##size); \
707 __alias(__asan_store##size) \
708 void __asan_store##size##_noabort(unsigned long); \
709 EXPORT_SYMBOL(__asan_store##size##_noabort)
711 DEFINE_ASAN_LOAD_STORE(1);
712 DEFINE_ASAN_LOAD_STORE(2);
713 DEFINE_ASAN_LOAD_STORE(4);
714 DEFINE_ASAN_LOAD_STORE(8);
715 DEFINE_ASAN_LOAD_STORE(16);
717 void __asan_loadN(unsigned long addr, size_t size)
719 check_memory_region(addr, size, false);
721 EXPORT_SYMBOL(__asan_loadN);
723 __alias(__asan_loadN)
724 void __asan_loadN_noabort(unsigned long, size_t);
725 EXPORT_SYMBOL(__asan_loadN_noabort);
727 void __asan_storeN(unsigned long addr, size_t size)
729 check_memory_region(addr, size, true);
731 EXPORT_SYMBOL(__asan_storeN);
733 __alias(__asan_storeN)
734 void __asan_storeN_noabort(unsigned long, size_t);
735 EXPORT_SYMBOL(__asan_storeN_noabort);
737 /* to shut up compiler complaints */
738 void __asan_handle_no_return(void) {}
739 EXPORT_SYMBOL(__asan_handle_no_return);
741 #ifdef CONFIG_MEMORY_HOTPLUG
742 static int kasan_mem_notifier(struct notifier_block *nb,
743 unsigned long action, void *data)
745 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
748 static int __init kasan_memhotplug_init(void)
750 pr_err("WARNING: KASAN doesn't support memory hot-add\n");
751 pr_err("Memory hot-add will be disabled\n");
753 hotplug_memory_notifier(kasan_mem_notifier, 0);
758 module_init(kasan_memhotplug_init);