aboutsummaryrefslogtreecommitdiff
path: root/include/linux/atomic/atomic-long.h
diff options
context:
space:
mode:
authorGravatar Mark Rutland <mark.rutland@arm.com> 2023-06-05 08:01:20 +0100
committerGravatar Peter Zijlstra <peterz@infradead.org> 2023-06-05 09:57:22 +0200
commit1d78814d41701c216e28fcf2656526146dec4a1a (patch)
tree5849192c902cf353cded8c01776f123d66645d30 /include/linux/atomic/atomic-long.h
parentlocking/atomic: scripts: simplify raw_atomic_long*() definitions (diff)
downloadlinux-1d78814d41701c216e28fcf2656526146dec4a1a.tar.gz
linux-1d78814d41701c216e28fcf2656526146dec4a1a.tar.bz2
linux-1d78814d41701c216e28fcf2656526146dec4a1a.zip
locking/atomic: scripts: simplify raw_atomic*() definitions
Currently each ordering variant has several potential definitions, with a mixture of preprocessor and C definitions, including several copies of its C prototype, e.g. | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Make this a bit simpler by defining the C prototype once, and writing the various potential definitions as plain C code guarded by ifdeffery. For example, the above becomes: | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | #if defined(arch_atomic_fetch_andnot_acquire) | return arch_atomic_fetch_andnot_acquire(i, v); | #elif defined(arch_atomic_fetch_andnot_relaxed) | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | #elif defined(arch_atomic_fetch_andnot) | return arch_atomic_fetch_andnot(i, v); | #else | return raw_atomic_fetch_and_acquire(~i, v); | #endif | } Which is far easier to read. As we now always have a single copy of the C prototype wrapping all the potential definitions, we now have an obvious single location for kerneldoc comments. At the same time, the fallbacks for raw_atomic*_xhcg() are made to use 'new' rather than 'i' as the name of the new value. This is what the existing fallback template used, and is more consistent with the raw_atomic{_try,}cmpxchg() fallbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com
Diffstat (limited to 'include/linux/atomic/atomic-long.h')
-rw-r--r--include/linux/atomic/atomic-long.h26
1 files changed, 13 insertions, 13 deletions
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 63e0b4078ebd..f564f71ff8af 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -622,42 +622,42 @@ raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
}
static __always_inline long
-raw_atomic_long_xchg(atomic_long_t *v, long i)
+raw_atomic_long_xchg(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
- return raw_atomic64_xchg(v, i);
+ return raw_atomic64_xchg(v, new);
#else
- return raw_atomic_xchg(v, i);
+ return raw_atomic_xchg(v, new);
#endif
}
static __always_inline long
-raw_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
- return raw_atomic64_xchg_acquire(v, i);
+ return raw_atomic64_xchg_acquire(v, new);
#else
- return raw_atomic_xchg_acquire(v, i);
+ return raw_atomic_xchg_acquire(v, new);
#endif
}
static __always_inline long
-raw_atomic_long_xchg_release(atomic_long_t *v, long i)
+raw_atomic_long_xchg_release(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
- return raw_atomic64_xchg_release(v, i);
+ return raw_atomic64_xchg_release(v, new);
#else
- return raw_atomic_xchg_release(v, i);
+ return raw_atomic_xchg_release(v, new);
#endif
}
static __always_inline long
-raw_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
#ifdef CONFIG_64BIT
- return raw_atomic64_xchg_relaxed(v, i);
+ return raw_atomic64_xchg_relaxed(v, new);
#else
- return raw_atomic_xchg_relaxed(v, i);
+ return raw_atomic_xchg_relaxed(v, new);
#endif
}
@@ -872,4 +872,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// ad09f849db0db5b30c82e497eeb9056a394c5f22
+// e785d25cc3f220b7d473d36aac9da85dd7eb13a8