aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/Kconfig.kasan168
-rw-r--r--lib/Kconfig.kcsan4
-rw-r--r--lib/Kconfig.ubsan2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bug.c15
-rw-r--r--lib/crypto/Kconfig1
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/dim/net_dim.c44
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/fault-inject.c3
-rw-r--r--lib/irq_poll.c8
-rw-r--r--lib/kstrtox.c6
-rw-r--r--lib/kunit/Makefile1
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c32
-rw-r--r--lib/kunit/executor_test.c4
-rw-r--r--lib/kunit/kunit-example-test.c16
-rw-r--r--lib/kunit/kunit-test.c37
-rw-r--r--lib/kunit/resource.c79
-rw-r--r--lib/kunit/test.c145
-rw-r--r--lib/list-test.c397
-rw-r--r--lib/lockref.c9
-rw-r--r--lib/nmi_backtrace.c4
-rw-r--r--lib/percpu-refcount.c1
-rw-r--r--lib/polynomial.c108
-rw-r--r--lib/random32.c347
-rw-r--r--lib/siphash.c32
-rw-r--r--lib/slub_kunit.c10
-rw-r--r--lib/stackdepot.c67
-rw-r--r--lib/strncpy_from_user.c2
-rw-r--r--lib/strnlen_user.c2
-rw-r--r--lib/test_bpf.c315
-rw-r--r--lib/test_kasan.c2
-rw-r--r--lib/test_sysctl.c32
-rw-r--r--lib/vsprintf.c67
37 files changed, 1293 insertions, 724 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 087e06b4cdfd..6a843639814f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -737,3 +737,6 @@ config PLDMFW
config ASN1_ENCODER
tristate
+
+config POLYNOMIAL
+ tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8fa08100dbd8..b1e506f1d328 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -485,24 +485,25 @@ config FRAME_POINTER
larger and slower, but it gives very useful debugging information
in case of kernel bugs. (precise oopses/stacktraces/warnings)
+config OBJTOOL
+ bool
+
config STACK_VALIDATION
bool "Compile-time stack metadata validation"
- depends on HAVE_STACK_VALIDATION
+ depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
+ select OBJTOOL
default n
help
- Add compile-time checks to validate stack metadata, including frame
- pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure
- that runtime stack traces are more reliable.
-
- This is also a prerequisite for generation of ORC unwind data, which
- is needed for CONFIG_UNWINDER_ORC.
+ Validate frame pointer rules at compile-time. This helps ensure that
+ runtime stack traces are more reliable.
For more information, see
tools/objtool/Documentation/stack-validation.txt.
-config VMLINUX_VALIDATION
+config NOINSTR_VALIDATION
bool
- depends on STACK_VALIDATION && DEBUG_ENTRY
+ depends on HAVE_NOINSTR_VALIDATION && DEBUG_ENTRY
+ select OBJTOOL
default y
config VMLINUX_MAP
@@ -709,6 +710,7 @@ config DEBUG_SLAB
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
depends on SLUB && SLUB_DEBUG
+ select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
default n
help
Boot with debugging on by default. SLUB boots by default with
@@ -1595,8 +1597,7 @@ config WARN_ALL_UNSEEDED_RANDOM
so architecture maintainers really need to do what they can
to get the CRNG seeded sooner after the system is booted.
However, since users cannot do anything actionable to
- address this, by default the kernel will issue only a single
- warning for the first use of unseeded randomness.
+ address this, by default this option is disabled.
Say Y here if you want to receive warnings for all uses of
unseeded randomness. This will be of use primarily for
@@ -2014,10 +2015,11 @@ config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
- depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+ select OBJTOOL if HAVE_NOINSTR_HACK
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -2119,10 +2121,11 @@ config TEST_DIV64
If unsure, say N.
config KPROBES_SANITY_TEST
- tristate "Kprobes sanity tests"
+ tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
boot. Samples of kprobe and kretprobe are inserted and
@@ -2396,8 +2399,9 @@ config TEST_SYSCTL
If unsure, say N.
config BITFIELD_KUNIT
- tristate "KUnit test bitfield functions at runtime"
+ tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
Enable this option to test the bitfield functions at boot.
@@ -2431,8 +2435,9 @@ config HASH_KUNIT_TEST
optimized versions. If unsure, say N.
config RESOURCE_KUNIT_TEST
- tristate "KUnit test for resource API"
+ tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the resource API unit test.
Tests the logic of API provided by resource.c and ioport.h.
@@ -2485,8 +2490,9 @@ config LINEAR_RANGES_TEST
If unsure, say N.
config CMDLINE_KUNIT_TEST
- tristate "KUnit test for cmdline API"
+ tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the cmdline API unit test.
Tests the logic of API provided by cmdline.c.
@@ -2496,8 +2502,9 @@ config CMDLINE_KUNIT_TEST
If unsure, say N.
config BITS_TEST
- tristate "KUnit test for bits.h"
+ tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the bits unit test.
Tests the logic of macros defined in bits.h.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1f3e620188a2..f0973da583e0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+
# This config refers to the generic KASAN mode.
config HAVE_ARCH_KASAN
bool
@@ -15,9 +16,8 @@ config HAVE_ARCH_KASAN_VMALLOC
config ARCH_DISABLE_KASAN_INLINE
bool
help
- An architecture might not support inline instrumentation.
- When this option is selected, inline and stack instrumentation are
- disabled.
+ Disables both inline and stack instrumentation. Selected by
+ architectures that do not support these instrumentation types.
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -26,13 +26,13 @@ config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
# This option is only required for software KASAN modes.
-# Old GCC versions don't have proper support for no_sanitize_address.
+# Old GCC versions do not have proper support for no_sanitize_address.
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
config CC_HAS_WORKING_NOSANITIZE_ADDRESS
def_bool !CC_IS_GCC || GCC_VERSION >= 80300
menuconfig KASAN
- bool "KASAN: runtime memory debugger"
+ bool "KASAN: dynamic memory safety error detector"
depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
@@ -40,10 +40,13 @@ menuconfig KASAN
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT_ALWAYS_INIT
help
- Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
- designed to find out-of-bounds accesses and use-after-free bugs.
+ Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
+ error detector designed to find out-of-bounds and use-after-free bugs.
+
See Documentation/dev-tools/kasan.rst for details.
+ For better error reports, also enable CONFIG_STACKTRACE.
+
if KASAN
choice
@@ -51,75 +54,71 @@ choice
default KASAN_GENERIC
help
KASAN has three modes:
- 1. generic KASAN (similar to userspace ASan,
- x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC),
- 2. software tag-based KASAN (arm64 only, based on software
- memory tagging (similar to userspace HWASan), enabled with
- CONFIG_KASAN_SW_TAGS), and
- 3. hardware tag-based KASAN (arm64 only, based on hardware
- memory tagging, enabled with CONFIG_KASAN_HW_TAGS).
- All KASAN modes are strictly debugging features.
+ 1. Generic KASAN (supported by many architectures, enabled with
+ CONFIG_KASAN_GENERIC, similar to userspace ASan),
+ 2. Software Tag-Based KASAN (arm64 only, based on software memory
+ tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace
+ HWASan), and
+ 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory
+ tagging, enabled with CONFIG_KASAN_HW_TAGS).
- For better error reports enable CONFIG_STACKTRACE.
+ See Documentation/dev-tools/kasan.rst for details about each mode.
config KASAN_GENERIC
- bool "Generic mode"
+ bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
- Enables generic KASAN mode.
+ Enables Generic KASAN.
- This mode is supported in both GCC and Clang. With GCC it requires
- version 8.3.0 or later. Any supported Clang version is compatible,
- but detection of out-of-bounds accesses for global variables is
- supported only since Clang 11.
+ Requires GCC 8.3.0+ or Clang.
- This mode consumes about 1/8th of available memory at kernel start
- and introduces an overhead of ~x1.5 for the rest of the allocations.
+ Consumes about 1/8th of available memory at kernel start and adds an
+ overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
config KASAN_SW_TAGS
- bool "Software tag-based mode"
+ bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
- Enables software tag-based KASAN mode.
+ Enables Software Tag-Based KASAN.
- This mode require software memory tagging support in the form of
- HWASan-like compiler instrumentation.
+ Requires GCC 11+ or Clang.
- Currently this mode is only implemented for arm64 CPUs and relies on
- Top Byte Ignore. This mode requires Clang.
+ Supported only on arm64 CPUs and relies on Top Byte Ignore.
- This mode consumes about 1/16th of available memory at kernel start
- and introduces an overhead of ~20% for the rest of the allocations.
- This mode may potentially introduce problems relating to pointer
- casting and comparison, as it embeds tags into the top byte of each
- pointer.
+ Consumes about 1/16th of available memory at kernel start and
+ add an overhead of ~20% for dynamic allocations.
- Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
- (the resulting kernel does not boot).
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
+
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
config KASAN_HW_TAGS
- bool "Hardware tag-based mode"
+ bool "Hardware Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_HW_TAGS
depends on SLUB
help
- Enables hardware tag-based KASAN mode.
+ Enables Hardware Tag-Based KASAN.
+
+ Requires GCC 10+ or Clang 12+.
- This mode requires hardware memory tagging support, and can be used
- by any architecture that provides it.
+ Supported only on arm64 CPUs starting from ARMv8.5 and relies on
+ Memory Tagging Extension and Top Byte Ignore.
- Currently this mode is only implemented for arm64 CPUs starting from
- ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore.
+ Consumes about 1/32nd of available memory.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
endchoice
@@ -131,83 +130,80 @@ choice
config KASAN_OUTLINE
bool "Outline instrumentation"
help
- Before every memory access compiler insert function call
- __asan_load*/__asan_store*. These functions performs check
- of shadow memory. This is slower than inline instrumentation,
- however it doesn't bloat size of kernel's .text section so
- much as inline does.
+ Makes the compiler insert function calls that check whether the memory
+ is accessible before each memory access. Slower than KASAN_INLINE, but
+ does not bloat the size of the kernel's .text section so much.
config KASAN_INLINE
bool "Inline instrumentation"
depends on !ARCH_DISABLE_KASAN_INLINE
help
- Compiler directly inserts code checking shadow memory before
- memory accesses. This is faster than outline (in some workloads
- it gives about x2 boost over outline instrumentation), but
- make kernel's .text size much bigger.
+ Makes the compiler directly insert memory accessibility checks before
+ each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for
+ some workloads), but makes the kernel's .text size much bigger.
endchoice
config KASAN_STACK
- bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
depends on KASAN_GENERIC || KASAN_SW_TAGS
depends on !ARCH_DISABLE_KASAN_INLINE
default y if CC_IS_GCC
help
- The LLVM stack address sanitizer has a know problem that
- causes excessive stack usage in a lot of functions, see
- https://bugs.llvm.org/show_bug.cgi?id=38809
- Disabling asan-stack makes it safe to run kernels build
- with clang-8 with KASAN enabled, though it loses some of
- the functionality.
- This feature is always disabled when compile-testing with clang
- to avoid cluttering the output in stack overflow warnings,
- but clang users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
- to use and enabled by default.
- If the architecture disables inline instrumentation, stack
- instrumentation is also disabled as it adds inline-style
- instrumentation that is run unconditionally.
+ Disables stack instrumentation and thus KASAN's ability to detect
+ out-of-bounds bugs in stack variables.
+
+ With Clang, stack instrumentation has a problem that causes excessive
+ stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ with Clang, this option is deemed unsafe.
+
+ This option is always disabled when compile-testing with Clang to
+ avoid cluttering the log with stack overflow warnings.
+
+ With GCC, enabling stack instrumentation is assumed to be safe.
+
+ If the architecture disables inline instrumentation via
+ ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled
+ as well, as it adds inline-style instrumentation that is run
+ unconditionally.
config KASAN_TAGS_IDENTIFY
- bool "Enable memory corruption identification"
+ bool "Memory corruption type identification"
depends on KASAN_SW_TAGS || KASAN_HW_TAGS
help
- This option enables best-effort identification of bug type
- (use-after-free or out-of-bounds) at the cost of increased
- memory consumption.
+ Enables best-effort identification of the bug types (use-after-free
+ or out-of-bounds) at the cost of increased memory consumption.
+ Only applicable for the tag-based KASAN modes.
config KASAN_VMALLOC
bool "Check accesses to vmalloc allocations"
depends on HAVE_ARCH_KASAN_VMALLOC
help
- This mode makes KASAN check accesses to vmalloc allocations for
- validity.
+ Makes KASAN check the validity of accesses to vmalloc allocations.
- With software KASAN modes, checking is done for all types of vmalloc
- allocations. Enabling this option leads to higher memory usage.
+ With software KASAN modes, all types vmalloc allocations are
+ checked. Enabling this option leads to higher memory usage.
- With hardware tag-based KASAN, only VM_ALLOC mappings are checked.
- There is no additional memory usage.
+ With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings
+ are checked. There is no additional memory usage.
config KASAN_KUNIT_TEST
tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
depends on KASAN && KUNIT
default KUNIT_ALL_TESTS
help
- This is a KUnit test suite doing various nasty things like
- out of bounds and use after free accesses. It is useful for testing
- kernel debugging features like KASAN.
+ A KUnit-based KASAN test suite. Triggers different kinds of
+ out-of-bounds and use-after-free accesses. Useful for testing whether
+ KASAN can detect certain bug types.
For more information on KUnit and unit tests in general, please refer
- to the KUnit documentation in Documentation/dev-tools/kunit.
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
config KASAN_MODULE_TEST
tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
depends on m && KASAN && !KASAN_HW_TAGS
help
- This is a part of the KASAN test suite that is incompatible with
- KUnit. Currently includes tests that do bad copy_from/to_user
- accesses.
+ A part of the KASAN test suite that is not integrated with KUnit.
+ Incompatible with Hardware Tag-Based KASAN.
endif # KASAN
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index de022445fbba..47a693c45864 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -187,7 +187,9 @@ config KCSAN_WEAK_MEMORY
# We can either let objtool nop __tsan_func_{entry,exit}() and builtin
# atomics instrumentation in .noinstr.text, or use a compiler that can
# implement __no_kcsan to really remove all instrumentation.
- depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ CC_IS_GCC || CLANG_VERSION >= 140000
+ select OBJTOOL if HAVE_NOINSTR_HACK
help
Enable support for modeling a subset of weak memory, which allows
detecting a subset of data races due to missing memory barriers.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index f3c57ed51838..c4fe15d38b60 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -94,7 +94,7 @@ config UBSAN_UNREACHABLE
bool "Perform checking for unreachable code"
# objtool already handles unreachable checking and gets angry about
# seeing UBSan instrumentation located in unreachable places.
- depends on !STACK_VALIDATION
+ depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || X86_SMAP))
depends on $(cc-option,-fsanitize=unreachable)
help
This option enables -fsanitize=unreachable which checks for control
diff --git a/lib/Makefile b/lib/Makefile
index 6b9ffc1bd1ee..89fcae891361 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -263,6 +263,8 @@ obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+obj-$(CONFIG_POLYNOMIAL) += polynomial.o
+
# stackdepot.c should not be instrumented or call instrumented functions.
# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
# file.
diff --git a/lib/bug.c b/lib/bug.c
index 45a0584f6541..c223a2575b72 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -6,8 +6,7 @@
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
- CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
- the containing struct bug_entry for bug_addr and file.
+ CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
@@ -53,10 +52,10 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- return bug->bug_addr;
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp;
#else
- return (unsigned long)bug + bug->bug_addr_disp;
+ return bug->bug_addr;
#endif
}
@@ -131,10 +130,10 @@ void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line)
{
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- *file = bug->file;
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = (const char *)&bug->file_disp + bug->file_disp;
#else
- *file = (const char *)bug + bug->file_disp;
+ *file = bug->file;
#endif
*line = bug->line;
#else
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 379a66d7f504..a80b8c4dc2cf 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -85,6 +85,7 @@ config CRYPTO_LIB_POLY1305_RSIZE
default 11 if X86_64
default 9 if ARM || ARM64
default 1
+ depends on CRYPTO_LIB_POLY1305
config CRYPTO_ARCH_HAVE_LIB_POLY1305
tristate
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 6946f8e204e3..337d797a7141 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic infrastructure for lifetime debugging of objects.
*
- * Started by Thomas Gleixner
- *
* Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
- *
- * For licencing details see kernel-base/COPYING
*/
#define pr_fmt(fmt) "ODEBUG: " fmt
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 06811d866775..53f6b9c6e936 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -12,41 +12,41 @@
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_RX_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
+ {.usec = 2, .pkts = 256,}, \
+ {.usec = 8, .pkts = 128,}, \
+ {.usec = 16, .pkts = 64,}, \
+ {.usec = 32, .pkts = 64,}, \
+ {.usec = 64, .pkts = 64,} \
}
#define NET_DIM_TX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_TX_CQE_PROFILES { \
- {5, 128}, \
- {8, 64}, \
- {16, 32}, \
- {32, 32}, \
- {64, 32} \
+ {.usec = 5, .pkts = 128,}, \
+ {.usec = 8, .pkts = 64,}, \
+ {.usec = 16, .pkts = 32,}, \
+ {.usec = 32, .pkts = 32,}, \
+ {.usec = 64, .pkts = 32,} \
}
static const struct dim_cq_moder
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 6b7f1bf6715d..83471e81501a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl);
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index ce12621b4275..423784d9c058 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
+ if (attr->no_warn)
+ return;
+
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2f17b488d58e..2d5329a42105 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -188,14 +188,18 @@ EXPORT_SYMBOL(irq_poll_init);
static int irq_poll_cpu_dead(unsigned int cpu)
{
/*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
+ * If a CPU goes away, splice its entries to the current CPU and
+ * set the POLL softirq bit. The local_bh_disable()/enable() pair
+ * ensures that it is handled. Otherwise the current CPU could
+ * reach idle with the POLL softirq pending.
*/
+ local_bh_disable();
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
+ local_bh_enable();
return 0;
}
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 886510d248e5..08c14019841a 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -340,7 +340,7 @@ EXPORT_SYMBOL(kstrtos8);
* @s: input string
* @res: result
*
- * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
+ * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
* [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
@@ -353,11 +353,15 @@ int kstrtobool(const char *s, bool *res)
switch (s[0]) {
case 'y':
case 'Y':
+ case 't':
+ case 'T':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
+ case 'f':
+ case 'F':
case '0':
*res = false;
return 0;
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index c49f4ffb6273..29aff6562b42 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_KUNIT) += kunit.o
kunit-objs += test.o \
+ resource.o \
string-stream.o \
assert.o \
try-catch.o \
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index b71db0abc12b..1048ef1b8d6e 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -52,7 +52,7 @@ static void debugfs_print_result(struct seq_file *seq,
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
- bool success = kunit_suite_has_succeeded(suite);
+ enum kunit_status success = kunit_suite_has_succeeded(suite);
struct kunit_case *test_case;
if (!suite || !suite->log)
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 22640c9ee819..96f96e42ce06 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -71,9 +71,13 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob)
/* Use memcpy to workaround copy->name being const. */
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
memcpy(copy, suite, sizeof(*copy));
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered)
+ return ERR_PTR(-ENOMEM);
n = 0;
kunit_suite_for_each_test_case(suite, test_case) {
@@ -106,14 +110,16 @@ kunit_filter_subsuite(struct kunit_suite * const * const subsuite,
filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL);
if (!filtered)
- return NULL;
+ return ERR_PTR(-ENOMEM);
n = 0;
for (i = 0; subsuite[i] != NULL; ++i) {
if (!glob_match(filter->suite_glob, subsuite[i]->name))
continue;
filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob);
- if (filtered_suite)
+ if (IS_ERR(filtered_suite))
+ return ERR_CAST(filtered_suite);
+ else if (filtered_suite)
filtered[n++] = filtered_suite;
}
filtered[n] = NULL;
@@ -146,7 +152,8 @@ static void kunit_free_suite_set(struct suite_set suite_set)
}
static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
- const char *filter_glob)
+ const char *filter_glob,
+ int *err)
{
int i;
struct kunit_suite * const **copy, * const *filtered_subsuite;
@@ -166,6 +173,10 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
for (i = 0; i < max; ++i) {
filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter);
+ if (IS_ERR(filtered_subsuite)) {
+ *err = PTR_ERR(filtered_subsuite);
+ return filtered;
+ }
if (filtered_subsuite)
*copy++ = filtered_subsuite;
}
@@ -236,9 +247,15 @@ int kunit_run_all_tests(void)
.start = __kunit_suites_start,
.end = __kunit_suites_end,
};
+ int err = 0;
- if (filter_glob_param)
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param);
+ if (filter_glob_param) {
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err);
+ if (err) {
+ pr_err("kunit executor: error filtering suites: %d\n", err);
+ goto out;
+ }
+ }
if (!action_param)
kunit_exec_run_tests(&suite_set);
@@ -251,9 +268,10 @@ int kunit_run_all_tests(void)
kunit_free_suite_set(suite_set);
}
- kunit_handle_shutdown();
- return 0;
+out:
+ kunit_handle_shutdown();
+ return err;
}
#if IS_BUILTIN(CONFIG_KUNIT_TEST)
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 4ed57fd94e42..eac6ff480273 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -137,14 +137,16 @@ static void filter_suites_test(struct kunit *test)
.end = suites + 2,
};
struct suite_set filtered = {.start = NULL, .end = NULL};
+ int err = 0;
/* Emulate two files, each having one suite */
subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases);
subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
/* Filter out suite1 */
- filtered = kunit_filter_suites(&suite_set, "suite0");
+ filtered = kunit_filter_suites(&suite_set, "suite0", &err);
kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */
+ KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start);
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 4bbf37c04eba..f8fe582c9e36 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -41,6 +41,17 @@ static int example_test_init(struct kunit *test)
}
/*
+ * This is run once before all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static int example_test_init_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "initializing suite\n");
+
+ return 0;
+}
+
+/*
* This test should always be skipped.
*/
static void example_skip_test(struct kunit *test)
@@ -91,6 +102,8 @@ static void example_all_expect_macros_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
KUNIT_EXPECT_PTR_NE(test, test, NULL);
+ KUNIT_EXPECT_NULL(test, NULL);
+ KUNIT_EXPECT_NOT_NULL(test, test);
/* String assertions */
KUNIT_EXPECT_STREQ(test, "hi", "hi");
@@ -140,17 +153,20 @@ static struct kunit_case example_test_cases[] = {
* may be specified which runs after every test case and can be used to for
* cleanup. For clarity, running tests in a test suite would behave as follows:
*
+ * suite.suite_init(suite);
* suite.init(test);
* suite.test_case[0](test);
* suite.exit(test);
* suite.init(test);
* suite.test_case[1](test);
* suite.exit(test);
+ * suite.suite_exit(suite);
* ...;
*/
static struct kunit_suite example_test_suite = {
.name = "example",
.init = example_test_init,
+ .suite_init = example_test_init_suite,
.test_cases = example_test_cases,
};
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 555601d17f79..13d0bd8b07a9 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -190,6 +190,40 @@ static void kunit_resource_test_destroy_resource(struct kunit *test)
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
+static void kunit_resource_test_remove_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ /* The resource is in the list */
+ KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources));
+
+ /* Remove the resource. The pointer is still valid, but it can't be
+ * found.
+ */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* We haven't been freed yet. */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Removing the resource multiple times is valid. */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* Despite having been removed twice (from only one reference), the
+ * resource still has not been freed.
+ */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Free the resource. */
+ kunit_put_resource(res);
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+}
+
static void kunit_resource_test_cleanup_resources(struct kunit *test)
{
int i;
@@ -387,6 +421,7 @@ static struct kunit_case kunit_resource_test_cases[] = {
KUNIT_CASE(kunit_resource_test_init_resources),
KUNIT_CASE(kunit_resource_test_alloc_resource),
KUNIT_CASE(kunit_resource_test_destroy_resource),
+ KUNIT_CASE(kunit_resource_test_remove_resource),
KUNIT_CASE(kunit_resource_test_cleanup_resources),
KUNIT_CASE(kunit_resource_test_proper_free_ordering),
KUNIT_CASE(kunit_resource_test_static),
@@ -435,7 +470,7 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite.log, "along with this."));
#else
- KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL);
+ KUNIT_EXPECT_NULL(test, test->log);
#endif
}
diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c
new file mode 100644
index 000000000000..c414df922f34
--- /dev/null
+++ b/lib/kunit/resource.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kref.h>
+
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in. Doesn't initialize res->should_kfree.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ res->free = free;
+ kref_init(&res->refcount);
+
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
+
+ spin_lock_irqsave(&test->lock, flags);
+ list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kunit_add_resource);
+
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
+{
+ unsigned long flags;
+ bool was_linked;
+
+ spin_lock_irqsave(&test->lock, flags);
+ was_linked = !list_empty(&res->node);
+ list_del_init(&res->node);
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ if (was_linked)
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
+
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
+
+ if (!res)
+ return -ENOENT;
+
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 3bca3bf5c15b..a5053a07409f 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -6,10 +6,10 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include <kunit/resource.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/kernel.h>
-#include <linux/kref.h>
#include <linux/moduleparam.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -134,7 +134,7 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
-static void kunit_print_subtest_start(struct kunit_suite *suite)
+static void kunit_print_suite_start(struct kunit_suite *suite)
{
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
@@ -179,6 +179,9 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
const struct kunit_case *test_case;
enum kunit_status status = KUNIT_SKIPPED;
+ if (suite->suite_init_err)
+ return KUNIT_FAILURE;
+
kunit_suite_for_each_test_case(suite, test_case) {
if (test_case->status == KUNIT_FAILURE)
return KUNIT_FAILURE;
@@ -192,7 +195,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static size_t kunit_suite_counter = 1;
-static void kunit_print_subtest_end(struct kunit_suite *suite)
+static void kunit_print_suite_end(struct kunit_suite *suite)
{
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
@@ -241,7 +244,7 @@ static void kunit_print_string_stream(struct kunit *test,
}
static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
- enum kunit_assert_type type, struct kunit_assert *assert,
+ enum kunit_assert_type type, const struct kunit_assert *assert,
const struct va_format *message)
{
struct string_stream *stream;
@@ -281,7 +284,7 @@ static void __noreturn kunit_abort(struct kunit *test)
void kunit_do_failed_assertion(struct kunit *test,
const struct kunit_loc *loc,
enum kunit_assert_type type,
- struct kunit_assert *assert,
+ const struct kunit_assert *assert,
const char *fmt, ...)
{
va_list args;
@@ -498,7 +501,16 @@ int kunit_run_tests(struct kunit_suite *suite)
struct kunit_result_stats suite_stats = { 0 };
struct kunit_result_stats total_stats = { 0 };
- kunit_print_subtest_start(suite);
+ if (suite->suite_init) {
+ suite->suite_init_err = suite->suite_init(suite);
+ if (suite->suite_init_err) {
+ kunit_err(suite, KUNIT_SUBTEST_INDENT
+ "# failed to initialize (%d)", suite->suite_init_err);
+ goto suite_end;
+ }
+ }
+
+ kunit_print_suite_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
@@ -551,8 +563,12 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_accumulate_stats(&total_stats, param_stats);
}
+ if (suite->suite_exit)
+ suite->suite_exit(suite);
+
kunit_print_suite_stats(suite, suite_stats, total_stats);
- kunit_print_subtest_end(suite);
+suite_end:
+ kunit_print_suite_end(suite);
return 0;
}
@@ -562,6 +578,7 @@ static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
+ suite->suite_init_err = 0;
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites)
@@ -592,120 +609,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites)
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
-/*
- * Used for static resources and when a kunit_resource * has been created by
- * kunit_alloc_resource(). When an init function is supplied, @data is passed
- * into the init function; otherwise, we simply set the resource data field to
- * the data value passed in.
- */
-int kunit_add_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- void *data)
-{
- int ret = 0;
- unsigned long flags;
-
- res->free = free;
- kref_init(&res->refcount);
-
- if (init) {
- ret = init(res, data);
- if (ret)
- return ret;
- } else {
- res->data = data;
- }
-
- spin_lock_irqsave(&test->lock, flags);
- list_add_tail(&res->node, &test->resources);
- /* refcount for list is established by kref_init() */
- spin_unlock_irqrestore(&test->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kunit_add_resource);
-
-int kunit_add_named_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- const char *name,
- void *data)
-{
- struct kunit_resource *existing;
-
- if (!name)
- return -EINVAL;
-
- existing = kunit_find_named_resource(test, name);
- if (existing) {
- kunit_put_resource(existing);
- return -EEXIST;
- }
-
- res->name = name;
-
- return kunit_add_resource(test, init, free, res, data);
-}
-EXPORT_SYMBOL_GPL(kunit_add_named_resource);
-
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *data)
-{
- struct kunit_resource *res;
- int ret;
-
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
-
- ret = kunit_add_resource(test, init, free, res, data);
- if (!ret) {
- /*
- * bump refcount for get; kunit_resource_put() should be called
- * when done.
- */
- kunit_get_resource(res);
- return res;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource);
-
-void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&test->lock, flags);
- list_del(&res->node);
- spin_unlock_irqrestore(&test->lock, flags);
- kunit_put_resource(res);
-}
-EXPORT_SYMBOL_GPL(kunit_remove_resource);
-
-int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
- void *match_data)
-{
- struct kunit_resource *res = kunit_find_resource(test, match,
- match_data);
-
- if (!res)
- return -ENOENT;
-
- kunit_remove_resource(test, res);
-
- /* We have a reference also via _find(); drop it. */
- kunit_put_resource(res);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kunit_destroy_resource);
-
struct kunit_kmalloc_array_params {
size_t n;
size_t size;
diff --git a/lib/list-test.c b/lib/list-test.c
index 035ef6597640..d374cf5d1a57 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -804,6 +804,401 @@ static struct kunit_suite list_test_module = {
.test_cases = list_test_cases,
};
-kunit_test_suites(&list_test_module);
+struct hlist_test_struct {
+ int data;
+ struct hlist_node list;
+};
+
+static void hlist_test_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct hlist_head list1 = HLIST_HEAD_INIT;
+ struct hlist_head list2;
+ HLIST_HEAD(list3);
+ struct hlist_head *list4;
+ struct hlist_head *list5;
+
+ INIT_HLIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_HLIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_HLIST_HEAD(list5);
+
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void hlist_test_unhashed(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+}
+
+/* Doesn't test concurrency guarantees */
+static void hlist_test_unhashed_lockless(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+}
+
+static void hlist_test_del(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+}
+
+static void hlist_test_del_init(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del_init(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+
+ /* a is now initialised */
+ KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
+}
+
+/* Tests all three hlist_add_* functions */
+static void hlist_test_add(struct kunit *test)
+{
+ struct hlist_node a, b, c, d;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_head(&b, &list);
+ hlist_add_before(&c, &a);
+ hlist_add_behind(&d, &a);
+
+ /* should be [list] -> b -> c -> a -> d */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+
+ KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
+
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
+ KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
+
+ KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
+}
+
+/* Tests both hlist_fake() and hlist_add_fake() */
+static void hlist_test_fake(struct kunit *test)
+{
+ struct hlist_node a;
+
+ INIT_HLIST_NODE(&a);
+
+ /* not fake after init */
+ KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
+
+ hlist_add_fake(&a);
+
+ /* is now fake */
+ KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
+}
+
+static void hlist_test_is_singular_node(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&a, &list);
+ KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&b, &list);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
+}
+
+static void hlist_test_empty(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ /* list starts off empty */
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+
+ hlist_add_head(&a, &list);
+
+ /* list is no longer empty */
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
+}
+
+static void hlist_test_move_list(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list1);
+ HLIST_HEAD(list2);
+
+ hlist_add_head(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ hlist_move_list(&list1, &list2);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
+
+}
+
+static void hlist_test_entry(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry(&(test_struct.list),
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry_safe(&(test_struct.list),
+ struct hlist_test_struct, list));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL,
+ hlist_entry_safe((struct hlist_node *)NULL,
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_for_each(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+
+static void hlist_test_for_each_safe(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur, *n;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ hlist_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+static void hlist_test_for_each_entry(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void hlist_test_for_each_entry_continue(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ /* We skip the first (zero-th) entry. */
+ i = 1;
+
+ cur = &entries[0];
+ hlist_for_each_entry_continue(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was not visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 0);
+ /* The second (and presumably others), were. */
+ KUNIT_EXPECT_EQ(test, entries[1].data, 42);
+}
+
+static void hlist_test_for_each_entry_from(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ cur = &entries[0];
+ hlist_for_each_entry_from(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 42);
+}
+
+static void hlist_test_for_each_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ struct hlist_node *tmp_node;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ hlist_del(&cur->list);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+
+static struct kunit_case hlist_test_cases[] = {
+ KUNIT_CASE(hlist_test_init),
+ KUNIT_CASE(hlist_test_unhashed),
+ KUNIT_CASE(hlist_test_unhashed_lockless),
+ KUNIT_CASE(hlist_test_del),
+ KUNIT_CASE(hlist_test_del_init),
+ KUNIT_CASE(hlist_test_add),
+ KUNIT_CASE(hlist_test_fake),
+ KUNIT_CASE(hlist_test_is_singular_node),
+ KUNIT_CASE(hlist_test_empty),
+ KUNIT_CASE(hlist_test_move_list),
+ KUNIT_CASE(hlist_test_entry),
+ KUNIT_CASE(hlist_test_entry_safe),
+ KUNIT_CASE(hlist_test_for_each),
+ KUNIT_CASE(hlist_test_for_each_safe),
+ KUNIT_CASE(hlist_test_for_each_entry),
+ KUNIT_CASE(hlist_test_for_each_entry_continue),
+ KUNIT_CASE(hlist_test_for_each_entry_from),
+ KUNIT_CASE(hlist_test_for_each_entry_safe),
+ {},
+};
+
+static struct kunit_suite hlist_test_module = {
+ .name = "hlist",
+ .test_cases = hlist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module);
MODULE_LICENSE("GPL v2");
diff --git a/lib/lockref.c b/lib/lockref.c
index 5b34bbd3eba8..c6f0b183b937 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -14,12 +14,11 @@
BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
- struct lockref new = old, prev = old; \
+ struct lockref new = old; \
CODE \
- old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
- old.lock_count, \
- new.lock_count); \
- if (likely(old.lock_count == prev.lock_count)) { \
+ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
+ &old.lock_count, \
+ new.lock_count))) { \
SUCCESS; \
} \
if (!--retry) \
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 199ab201d501..d01aec6ae15c 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
* Allow nested NMI backtraces while serializing
* against other CPUs.
*/
- printk_cpu_lock_irqsave(flags);
+ printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
- printk_cpu_unlock_irqrestore(flags);
+ printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index af9302141bcf..e5c5315da274 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -76,6 +76,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
+ ref->percpu_count_ptr = 0;
return -ENOMEM;
}
diff --git a/lib/polynomial.c b/lib/polynomial.c
new file mode 100644
index 000000000000..66d383445fec
--- /dev/null
+++ b/lib/polynomial.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic polynomial calculation using integer coefficients.
+ *
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/polynomial.h>
+
+/*
+ * Originally this was part of drivers/hwmon/bt1-pvt.c.
+ * There the following conversion is used and should serve as an example here:
+ *
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ *
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1
+ *
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ *
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ *
+ * static const struct polynomial poly_temp_to_N = {
+ * .total_divider = 10000,
+ * .terms = {
+ * {4, 18322, 10000, 10000},
+ * {3, 2343, 10000, 10},
+ * {2, 87018, 10000, 10},
+ * {1, 39269, 1000, 1},
+ * {0, 1720400, 1, 1}
+ * }
+ * };
+ *
+ * static const struct polynomial poly_N_to_temp = {
+ * .total_divider = 1,
+ * .terms = {
+ * {4, -16743, 1000, 1},
+ * {3, 81542, 1000, 1},
+ * {2, -182010, 1000, 1},
+ * {1, 310200, 1000, 1},
+ * {0, -48380, 1, 1}
+ * }
+ * };
+ */
+
+/**
+ * polynomial_calc - calculate a polynomial using integer arithmetic
+ *
+ * @poly: pointer to the descriptor of the polynomial
+ * @data: input value of the polynimal
+ *
+ * Calculate the result of a polynomial using only integer arithmetic. For
+ * this to work without too much loss of precision the coefficients has to
+ * be altered. This is called factor redistribution.
+ *
+ * Returns the result of the polynomial calculation.
+ */
+long polynomial_calc(const struct polynomial *poly, long data)
+{
+ const struct polynomial_term *term = poly->terms;
+ long total_divider = poly->total_divider ?: 1;
+ long tmp, ret = 0;
+ int deg;
+
+ /*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward.
+ * We walk over each degree term up to the free one, and perform
+ * the redistributed multiplication of the term coefficient, its
+ * divider (as for the rationale fraction representation), data
+ * power and the rational fraction divider leftover. Then all of
+ * this is collected in a total sum variable, which value is
+ * normalized by the total divider before being returned.
+ */
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / total_divider;
+}
+EXPORT_SYMBOL_GPL(polynomial_calc);
+
+MODULE_DESCRIPTION("Generic polynomial calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/random32.c b/lib/random32.c
index 976632003ec6..d5d9029362cb 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -245,25 +245,13 @@ static struct prandom_test2 {
{ 407983964U, 921U, 728767059U },
};
-static u32 __extract_hwseed(void)
-{
- unsigned int val = 0;
-
- (void)(arch_get_random_seed_int(&val) ||
- arch_get_random_int(&val));
-
- return val;
-}
-
-static void prandom_seed_early(struct rnd_state *state, u32 seed,
- bool mix_with_hwseed)
+static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+ state->s1 = __seed(LCG(seed), 2U);
+ state->s2 = __seed(LCG(state->s1), 8U);
+ state->s3 = __seed(LCG(state->s2), 16U);
+ state->s4 = __seed(LCG(state->s3), 128U);
}
static int __init prandom_state_selftest(void)
@@ -274,7 +262,7 @@ static int __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test1[i].seed, false);
+ prandom_state_selftest_seed(&state, test1[i].seed);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
@@ -289,7 +277,7 @@ static int __init prandom_state_selftest(void)
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
- prandom_seed_early(&state, test2[i].seed, false);
+ prandom_state_selftest_seed(&state, test2[i].seed);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
@@ -310,324 +298,3 @@ static int __init prandom_state_selftest(void)
}
core_initcall(prandom_state_selftest);
#endif
-
-/*
- * The prandom_u32() implementation is now completely separate from the
- * prandom_state() functions, which are retained (for now) for compatibility.
- *
- * Because of (ab)use in the networking code for choosing random TCP/UDP port
- * numbers, which open DoS possibilities if guessable, we want something
- * stronger than a standard PRNG. But the performance requirements of
- * the network code do not allow robust crypto for this application.
- *
- * So this is a homebrew Junior Spaceman implementation, based on the
- * lowest-latency trustworthy crypto primitive available, SipHash.
- * (The authors of SipHash have not been consulted about this abuse of
- * their work.)
- *
- * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
- * one word of output. This abbreviated version uses 2 rounds per word
- * of output.
- */
-
-struct siprand_state {
- unsigned long v0;
- unsigned long v1;
- unsigned long v2;
- unsigned long v3;
-};
-
-static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
-DEFINE_PER_CPU(unsigned long, net_rand_noise);
-EXPORT_PER_CPU_SYMBOL(net_rand_noise);
-
-/*
- * This is the core CPRNG function. As "pseudorandom", this is not used
- * for truly valuable things, just intended to be a PITA to guess.
- * For maximum speed, we do just two SipHash rounds per word. This is
- * the same rate as 4 rounds per 64 bits that SipHash normally uses,
- * so hopefully it's reasonably secure.
- *
- * There are two changes from the official SipHash finalization:
- * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
- * they are there only to make the output rounds distinct from the input
- * rounds, and this application has no input rounds.
- * - Rather than returning v0^v1^v2^v3, return v1+v3.
- * If you look at the SipHash round, the last operation on v3 is
- * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
- * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
- * it still cancels out half of the bits in v2 for no benefit.)
- * Second, since the last combining operation was xor, continue the
- * pattern of alternating xor/add for a tiny bit of extra non-linearity.
- */
-static inline u32 siprand_u32(struct siprand_state *s)
-{
- unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
- unsigned long n = raw_cpu_read(net_rand_noise);
-
- v3 ^= n;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= n;
- s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
- return v1 + v3;
-}
-
-
-/**
- * prandom_u32 - pseudo random number generator
- *
- * A 32 bit pseudo-random number is generated using a fast
- * algorithm suitable for simulation. This algorithm is NOT
- * considered safe for cryptographic use.
- */
-u32 prandom_u32(void)
-{
- struct siprand_state *state = get_cpu_ptr(&net_rand_state);
- u32 res = siprand_u32(state);
-
- put_cpu_ptr(&net_rand_state);
- return res;
-}
-EXPORT_SYMBOL(prandom_u32);
-
-/**
- * prandom_bytes - get the requested number of pseudo-random bytes
- * @buf: where to copy the pseudo-random bytes to
- * @bytes: the requested number of bytes
- */
-void prandom_bytes(void *buf, size_t bytes)
-{
- struct siprand_state *state = get_cpu_ptr(&net_rand_state);
- u8 *ptr = buf;
-
- while (bytes >= sizeof(u32)) {
- put_unaligned(siprand_u32(state), (u32 *)ptr);
- ptr += sizeof(u32);
- bytes -= sizeof(u32);
- }
-
- if (bytes > 0) {
- u32 rem = siprand_u32(state);
-
- do {
- *ptr++ = (u8)rem;
- rem >>= BITS_PER_BYTE;
- } while (--bytes > 0);
- }
- put_cpu_ptr(&net_rand_state);
-}
-EXPORT_SYMBOL(prandom_bytes);
-
-/**
- * prandom_seed - add entropy to pseudo random number generator
- * @entropy: entropy value
- *
- * Add some additional seed material to the prandom pool.
- * The "entropy" is actually our IP address (the only caller is
- * the network code), not for unpredictability, but to ensure that
- * different machines are initialized differently.
- */
-void prandom_seed(u32 entropy)
-{
- int i;
-
- add_device_randomness(&entropy, sizeof(entropy));
-
- for_each_possible_cpu(i) {
- struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
- unsigned long v0 = state->v0, v1 = state->v1;
- unsigned long v2 = state->v2, v3 = state->v3;
-
- do {
- v3 ^= entropy;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= entropy;
- } while (unlikely(!v0 || !v1 || !v2 || !v3));
-
- WRITE_ONCE(state->v0, v0);
- WRITE_ONCE(state->v1, v1);
- WRITE_ONCE(state->v2, v2);
- WRITE_ONCE(state->v3, v3);
- }
-}
-EXPORT_SYMBOL(prandom_seed);
-
-/*
- * Generate some initially weak seeding values to allow
- * the prandom_u32() engine to be started.
- */
-static int __init prandom_init_early(void)
-{
- int i;
- unsigned long v0, v1, v2, v3;
-
- if (!arch_get_random_long(&v0))
- v0 = jiffies;
- if (!arch_get_random_long(&v1))
- v1 = random_get_entropy();
- v2 = v0 ^ PRND_K0;
- v3 = v1 ^ PRND_K1;
-
- for_each_possible_cpu(i) {
- struct siprand_state *state;
-
- v3 ^= i;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= i;
-
- state = per_cpu_ptr(&net_rand_state, i);
- state->v0 = v0; state->v1 = v1;
- state->v2 = v2; state->v3 = v3;
- }
-
- return 0;
-}
-core_initcall(prandom_init_early);
-
-
-/* Stronger reseeding when available, and periodically thereafter. */
-static void prandom_reseed(struct timer_list *unused);
-
-static DEFINE_TIMER(seed_timer, prandom_reseed);
-
-static void prandom_reseed(struct timer_list *unused)
-{
- unsigned long expires;
- int i;
-
- /*
- * Reinitialize each CPU's PRNG with 128 bits of key.
- * No locking on the CPUs, but then somewhat random results are,
- * well, expected.
- */
- for_each_possible_cpu(i) {
- struct siprand_state *state;
- unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
- unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
-#if BITS_PER_LONG == 32
- int j;
-
- /*
- * On 32-bit machines, hash in two extra words to
- * approximate 128-bit key length. Not that the hash
- * has that much security, but this prevents a trivial
- * 64-bit brute force.
- */
- for (j = 0; j < 2; j++) {
- unsigned long m = get_random_long();
-
- v3 ^= m;
- PRND_SIPROUND(v0, v1, v2, v3);
- PRND_SIPROUND(v0, v1, v2, v3);
- v0 ^= m;
- }
-#endif
- /*
- * Probably impossible in practice, but there is a
- * theoretical risk that a race between this reseeding
- * and the target CPU writing its state back could
- * create the all-zero SipHash fixed point.
- *
- * To ensure that never happens, ensure the state
- * we write contains no zero words.
- */
- state = per_cpu_ptr(&net_rand_state, i);
- WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
- WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
- WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
- WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
- }
-
- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
- expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
- mod_timer(&seed_timer, expires);
-}
-
-/*
- * The random ready callback can be called from almost any interrupt.
- * To avoid worrying about whether it's safe to delay that interrupt
- * long enough to seed all CPUs, just schedule an immediate timer event.
- */
-static int prandom_timer_start(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- mod_timer(&seed_timer, jiffies);
- return 0;
-}
-
-#ifdef CONFIG_RANDOM32_SELFTEST
-/* Principle: True 32-bit random numbers will all have 16 differing bits on
- * average. For each 32-bit number, there are 601M numbers differing by 16
- * bits, and 89% of the numbers differ by at least 12 bits. Note that more
- * than 16 differing bits also implies a correlation with inverted bits. Thus
- * we take 1024 random numbers and compare each of them to the other ones,
- * counting the deviation of correlated bits to 16. Constants report 32,
- * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
- * u32 total, TEST_SIZE may be as large as 4096 samples.
- */
-#define TEST_SIZE 1024
-static int __init prandom32_state_selftest(void)
-{
- unsigned int x, y, bits, samples;
- u32 xor, flip;
- u32 total;
- u32 *data;
-
- data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
- if (!data)
- return 0;
-
- for (samples = 0; samples < TEST_SIZE; samples++)
- data[samples] = prandom_u32();
-
- flip = total = 0;
- for (x = 0; x < samples; x++) {
- for (y = 0; y < samples; y++) {
- if (x == y)
- continue;
- xor = data[x] ^ data[y];
- flip |= xor;
- bits = hweight32(xor);
- total += (bits - 16) * (bits - 16);
- }
- }
-
- /* We'll return the average deviation as 2*sqrt(corr/samples), which
- * is also sqrt(4*corr/samples) which provides a better resolution.
- */
- bits = int_sqrt(total / (samples * (samples - 1)) * 4);
- if (bits > 6)
- pr_warn("prandom32: self test failed (at least %u bits"
- " correlated, fixed_mask=%#x fixed_value=%#x\n",
- bits, ~flip, data[0] & ~flip);
- else
- pr_info("prandom32: self test passed (less than %u bits"
- " correlated)\n",
- bits+1);
- kfree(data);
- return 0;
-}
-core_initcall(prandom32_state_selftest);
-#endif /* CONFIG_RANDOM32_SELFTEST */
-
-/*
- * Start periodic full reseeding as soon as strong
- * random numbers are available.
- */
-static int __init prandom_init_late(void)
-{
- static struct notifier_block random_ready = {
- .notifier_call = prandom_timer_start
- };
- int ret = register_random_ready_notifier(&random_ready);
-
- if (ret == -EALREADY) {
- prandom_timer_start(&random_ready, 0, NULL);
- ret = 0;
- }
- return ret;
-}
-late_initcall(prandom_init_late);
diff --git a/lib/siphash.c b/lib/siphash.c
index 72b9068ab57b..71d315a6ad62 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -18,19 +18,13 @@
#include <asm/word-at-a-time.h>
#endif
-#define SIPROUND \
- do { \
- v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
- v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
- v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
- v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
- } while (0)
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
#define PREAMBLE(len) \
- u64 v0 = 0x736f6d6570736575ULL; \
- u64 v1 = 0x646f72616e646f6dULL; \
- u64 v2 = 0x6c7967656e657261ULL; \
- u64 v3 = 0x7465646279746573ULL; \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
u64 b = ((u64)(len)) << 56; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
@@ -389,19 +383,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
}
EXPORT_SYMBOL(hsiphash_4u32);
#else
-#define HSIPROUND \
- do { \
- v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
- v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
- v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
- v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
- } while (0)
+#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
#define HPREAMBLE(len) \
- u32 v0 = 0; \
- u32 v1 = 0; \
- u32 v2 = 0x6c796765U; \
- u32 v3 = 0x74656462U; \
+ u32 v0 = HSIPHASH_CONST_0; \
+ u32 v1 = HSIPHASH_CONST_1; \
+ u32 v2 = HSIPHASH_CONST_2; \
+ u32 v3 = HSIPHASH_CONST_3; \
u32 b = ((u32)(len)) << 24; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 8662dc6cb509..7a0564d7cb7a 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -12,7 +12,7 @@ static int slab_errors;
static void test_clobber_zone(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test)
static void test_next_pointer(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
@@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index bf5ba9af0500..5ca0d086ef4a 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -66,6 +66,9 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
+static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
@@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-/*
- * __ref because of memblock_alloc(), which will not be actually called after
- * the __init code is gone, because at that point slab_is_available() is true
- */
-__ref int stack_depot_init(void)
+void __init stack_depot_want_early_init(void)
+{
+ /* Too late to request early init now */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_want_early_init = true;
+}
+
+int __init stack_depot_early_init(void)
+{
+ size_t size;
+
+ /* This is supposed to be called only once, from mm_init() */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+
+ __stack_depot_early_init_passed = true;
+
+ if (!__stack_depot_want_early_init || stack_depot_disable)
+ return 0;
+
+ size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n",
+ size);
+ stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+
+ if (!stack_table) {
+ pr_err("Stack Depot hash table allocation failed, disabling\n");
+ stack_depot_disable = true;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
+ int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
- size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
- int i;
-
- if (slab_is_available()) {
- pr_info("Stack Depot allocating hash table with kvmalloc\n");
- stack_table = kvmalloc(size, GFP_KERNEL);
- } else {
- pr_info("Stack Depot allocating hash table with memblock_alloc\n");
- stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
- }
- if (stack_table) {
- for (i = 0; i < STACK_HASH_SIZE; i++)
- stack_table[i] = NULL;
- } else {
+ pr_info("Stack Depot allocating hash table with kvcalloc\n");
+ stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
- mutex_unlock(&stack_depot_init_mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
}
}
mutex_unlock(&stack_depot_init_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 08fc72d3ed16..6432b8c3e431 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -25,7 +25,7 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src,
+static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index bffa0ebf9f8b..feeb935a2299 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -20,7 +20,7 @@
* if it fits in a aligned 'long'. The caller needs to check
* the return value against "> max".
*/
-static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long align, res = 0;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 0c5cb2d6436a..2a7836e115b4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -53,6 +53,7 @@
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
@@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = {
},
/* BPF_LDX_MEM B/H/W/DW */
{
- "BPF_LDX_MEM | BPF_B",
+ "BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
@@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_H",
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
@@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
},
{
- "BPF_LDX_MEM | BPF_W",
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
@@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = {
{ { 0, 0 } },
.stack_depth = 8,
},
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub)
if (test->aux & FLAG_NO_DATA)
return NULL;
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
@@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data)
if (test->aux & FLAG_NO_DATA)
return;
- kfree_skb(data);
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
}
static int filter_length(int which)
@@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = {
.result = 10,
},
{
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
"Tail call error path, max count reached",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ad880231dfa8..c233b1a4e984 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -391,7 +391,7 @@ static void krealloc_uaf(struct kunit *test)
kfree(ptr1);
KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
- KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
+ KUNIT_ASSERT_NULL(test, ptr2);
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
}
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index a5a3d6c27e1f..9a564971f539 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -38,6 +38,7 @@
static int i_zero;
static int i_one_hundred = 100;
+static int match_int_ok = 1;
struct test_sysctl_data {
int int_0001;
@@ -96,6 +97,13 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "match_int",
+ .data = &match_int_ok,
+ .maxlen = sizeof(match_int_ok),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "boot_int",
.data = &test_data.boot_int,
.maxlen = sizeof(test_data.boot_int),
@@ -132,6 +140,30 @@ static struct ctl_table_header *test_sysctl_header;
static int __init test_sysctl_init(void)
{
+ int i;
+
+ struct {
+ int defined;
+ int wanted;
+ } match_int[] = {
+ {.defined = *(int *)SYSCTL_ZERO, .wanted = 0},
+ {.defined = *(int *)SYSCTL_ONE, .wanted = 1},
+ {.defined = *(int *)SYSCTL_TWO, .wanted = 2},
+ {.defined = *(int *)SYSCTL_THREE, .wanted = 3},
+ {.defined = *(int *)SYSCTL_FOUR, .wanted = 4},
+ {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100},
+ {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200},
+ {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000},
+ {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000},
+ {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX},
+ {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535},
+ {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(match_int); i++)
+ if (match_int[i].defined != match_int[i].wanted)
+ match_int_ok = 0;
+
test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
if (!test_data.bitmap_0001)
return -ENOMEM;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 40d26a07a133..fb77f7bfd126 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -750,61 +750,38 @@ static int __init debug_boot_weak_hash_enable(char *str)
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
static void enable_ptr_key_workfn(struct work_struct *work)
{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
+ static_branch_enable(&filled_random_ptr_key);
}
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static int fill_random_ptr_key(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
- return 0;
-}
-
-static struct notifier_block random_ready = {
- .notifier_call = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int key_size = sizeof(ptr_key);
- int ret;
-
- /* Use hw RNG if available. */
- if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
- static_branch_disable(&not_filled_random_ptr_key);
- return 0;
- }
-
- ret = register_random_ready_notifier(&random_ready);
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
+ static siphash_key_t ptr_key __read_mostly;
unsigned long hashval;
- if (static_branch_unlikely(&not_filled_random_ptr_key))
- return -EAGAIN;
+ if (!static_branch_likely(&filled_random_ptr_key)) {
+ static bool filled = false;
+ static DEFINE_SPINLOCK(filling);
+ static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+ unsigned long flags;
+
+ if (!system_unbound_wq ||
+ (!rng_is_initialized() && !rng_has_arch_random()) ||
+ !spin_trylock_irqsave(&filling, flags))
+ return -EAGAIN;
+
+ if (!filled) {
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+ filled = true;
+ }
+ spin_unlock_irqrestore(&filling, flags);
+ }
+
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);