aboutsummaryrefslogtreecommitdiff
path: root/include/linux/kasan.h
diff options
context:
space:
mode:
authorGravatar Linus Torvalds <torvalds@linux-foundation.org> 2022-03-25 10:21:20 -0700
committerGravatar Linus Torvalds <torvalds@linux-foundation.org> 2022-03-25 10:21:20 -0700
commit29c8c18363eee546d19bd95daa1e33c64ee74414 (patch)
tree39245c8e255f1bd8547c964cb886f7699dd986e1 /include/linux/kasan.h
parentMerge tag 'riscv-for-linus-5.18-mw0' of git://git.kernel.org/pub/scm/linux/ke... (diff)
parentselftests: kselftest framework: provide "finished" helper (diff)
downloadlinux-29c8c18363eee546d19bd95daa1e33c64ee74414.tar.gz
linux-29c8c18363eee546d19bd95daa1e33c64ee74414.tar.bz2
linux-29c8c18363eee546d19bd95daa1e33c64ee74414.zip
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "This is the material which was staged after willystuff in linux-next. Subsystems affected by this patch series: mm (debug, selftests, pagecache, thp, rmap, migration, kasan, hugetlb, pagemap, madvise), and selftests" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (113 commits) selftests: kselftest framework: provide "finished" helper mm: madvise: MADV_DONTNEED_LOCKED mm: fix race between MADV_FREE reclaim and blkdev direct IO read mm: generalize ARCH_HAS_FILTER_PGPROT mm: unmap_mapping_range_tree() with i_mmap_rwsem shared mm: warn on deleting redirtied only if accounted mm/huge_memory: remove stale locking logic from __split_huge_pmd() mm/huge_memory: remove stale page_trans_huge_mapcount() mm/swapfile: remove stale reuse_swap_page() mm/khugepaged: remove reuse_swap_page() usage mm/huge_memory: streamline COW logic in do_huge_pmd_wp_page() mm: streamline COW logic in do_swap_page() mm: slightly clarify KSM logic in do_swap_page() mm: optimize do_wp_page() for fresh pages in local LRU pagevecs mm: optimize do_wp_page() for exclusive pages in the swapcache mm/huge_memory: make is_transparent_hugepage() static userfaultfd/selftests: enable hugetlb remap and remove event testing selftests/vm: add hugetlb madvise MADV_DONTNEED MADV_REMOVE test mm: enable MADV_DONTNEED for hugetlb mappings kasan: disable LOCKDEP when printing reports ...
Diffstat (limited to 'include/linux/kasan.h')
-rw-r--r--include/linux/kasan.h106
1 files changed, 62 insertions, 44 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b6a93261c92a..ceebcb9de7bf 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -19,13 +19,15 @@ struct task_struct;
#include <linux/linkage.h>
#include <asm/kasan.h>
-/* kasan_data struct is used in KUnit tests for KASAN expected failures */
-struct kunit_kasan_expectation {
- bool report_found;
-};
-
#endif
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE 0x00u
+#define KASAN_VMALLOC_INIT 0x01u
+#define KASAN_VMALLOC_VM_ALLOC 0x02u
+#define KASAN_VMALLOC_PROT_NORMAL 0x04u
+
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#include <linux/pgtable.h>
@@ -84,25 +86,8 @@ static inline void kasan_disable_current(void) {}
#ifdef CONFIG_KASAN_HW_TAGS
-void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
-void kasan_free_pages(struct page *page, unsigned int order);
-
#else /* CONFIG_KASAN_HW_TAGS */
-static __always_inline void kasan_alloc_pages(struct page *page,
- unsigned int order, gfp_t flags)
-{
- /* Only available for integrated init. */
- BUILD_BUG();
-}
-
-static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order)
-{
- /* Only available for integrated init. */
- BUILD_BUG();
-}
-
#endif /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_has_integrated_init(void)
@@ -282,10 +267,6 @@ static __always_inline bool kasan_check_byte(const void *addr)
return true;
}
-
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
-
#else /* CONFIG_KASAN */
static inline slab_flags_t kasan_never_merge(void)
@@ -414,34 +395,71 @@ static inline void kasan_init_hw_tags(void) { }
#ifdef CONFIG_KASAN_VMALLOC
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
-void kasan_poison_vmalloc(const void *start, unsigned long size);
-void kasan_unpoison_vmalloc(const void *start, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
-void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
+
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
#else /* CONFIG_KASAN_VMALLOC */
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
static inline int kasan_populate_vmalloc(unsigned long start,
unsigned long size)
{
return 0;
}
-
-static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
-{ }
-static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{ }
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) {}
+ unsigned long free_region_end) { }
-static inline void kasan_populate_early_vm_area_shadow(void *start,
- unsigned long size)
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
#endif /* CONFIG_KASAN_VMALLOC */
@@ -450,17 +468,17 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
!defined(CONFIG_KASAN_VMALLOC)
/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
*/
-int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
-void kasan_free_shadow(const struct vm_struct *vm);
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */