aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorGravatar Ard Biesheuvel <ardb@kernel.org> 2022-01-24 23:32:51 +0100
committerGravatar Ard Biesheuvel <ardb@kernel.org> 2022-01-31 16:06:35 +0100
commit4d5a643e738c6b6ccc1a05f6938643c3f08df29b (patch)
treeb30eec64ce43003e10bb7409cec7a23ec4ca1313 /arch/arm/include
parentARM: drop pointless SMP check on secondary startup path (diff)
downloadlinux-4d5a643e738c6b6ccc1a05f6938643c3f08df29b.tar.gz
linux-4d5a643e738c6b6ccc1a05f6938643c3f08df29b.tar.bz2
linux-4d5a643e738c6b6ccc1a05f6938643c3f08df29b.zip
ARM: make get_current() and __my_cpu_offset() __always_inline
The get_current() and __my_cpu_offset() accessors evaluate to only a single instruction emitted inline, but due to the size of the asm string that is created for SMP+v6 configurations, the compiler assumes otherwise, and may emit the functions out of line instead. So use __always_inline to avoid this. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/current.h2
-rw-r--r--arch/arm/include/asm/percpu.h2
2 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
index 131a89bbec6b..1e1178bf176d 100644
--- a/arch/arm/include/asm/current.h
+++ b/arch/arm/include/asm/current.h
@@ -14,7 +14,7 @@ struct task_struct;
extern struct task_struct *__current;
-static inline __attribute_const__ struct task_struct *get_current(void)
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
{
struct task_struct *cur;
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index a09034ae45a1..7545c87c251f 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -25,7 +25,7 @@ static inline void set_my_cpu_offset(unsigned long off)
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;