aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/static_call.c
diff options
context:
space:
mode:
authorGravatar Peter Zijlstra <peterz@infradead.org> 2021-10-30 09:47:58 +0200
committerGravatar Peter Zijlstra <peterz@infradead.org> 2021-11-11 13:09:31 +0100
commit2105a92748e83e2e3ee6be539da959706bbb3898 (patch)
tree04dd3c3a9a55be81189d30054b015b2587dc5968 /arch/x86/kernel/static_call.c
parentMerge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/t... (diff)
downloadlinux-2105a92748e83e2e3ee6be539da959706bbb3898.tar.gz
linux-2105a92748e83e2e3ee6be539da959706bbb3898.tar.bz2
linux-2105a92748e83e2e3ee6be539da959706bbb3898.zip
static_call,x86: Robustify trampoline patching
Add a few signature bytes after the static call trampoline and verify those bytes match before patching the trampoline. This avoids patching random other JMPs (such as CFI jump-table entries) instead. These bytes decode as: d: 53 push %rbx e: 43 54 rex.XB push %r12 And happen to spell "SCT". Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20211030074758.GT174703@worktop.programming.kicks-ass.net
Diffstat (limited to 'arch/x86/kernel/static_call.c')
-rw-r--r--arch/x86/kernel/static_call.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index ea028e736831..9c407a33a774 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -56,10 +56,15 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
text_poke_bp(insn, code, size, emulate);
}
-static void __static_call_validate(void *insn, bool tail)
+static void __static_call_validate(void *insn, bool tail, bool tramp)
{
u8 opcode = *(u8 *)insn;
+ if (tramp && memcmp(insn+5, "SCT", 3)) {
+ pr_err("trampoline signature fail");
+ BUG();
+ }
+
if (tail) {
if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE)
@@ -74,7 +79,8 @@ static void __static_call_validate(void *insn, bool tail)
/*
* If we ever trigger this, our text is corrupt, we'll probably not live long.
*/
- WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
+ pr_err("unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
+ BUG();
}
static inline enum insn_type __sc_insn(bool null, bool tail)
@@ -97,12 +103,12 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
mutex_lock(&text_mutex);
if (tramp) {
- __static_call_validate(tramp, true);
+ __static_call_validate(tramp, true, true);
__static_call_transform(tramp, __sc_insn(!func, true), func);
}
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
- __static_call_validate(site, tail);
+ __static_call_validate(site, tail, false);
__static_call_transform(site, __sc_insn(!func, tail), func);
}