summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-13 10:05:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-13 10:05:39 -0700
commit2e57d1d6062af11420bc329ca004ebe3f3f6f0ee (patch)
tree45d7c77f76bd77a2c6f71ac9dea2648b385f5cdd /arch
parentc0b9620bc3f0a0f914996cc6631522d41870a9e0 (diff)
parent5800e77d88c0cd98bc10460df148631afa7b5e4d (diff)
Merge tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull cmpxchg updates from Paul McKenney: "Provide one-byte and two-byte cmpxchg() support on sparc32, parisc, and csky This provides native one-byte and two-byte cmpxchg() support for sparc32 and parisc, courtesy of Al Viro. This support is provided by the same hashed-array-of-locks technique used for the other atomic operations provided for these two platforms. There is also emulated one-byte cmpxchg() support for csky using a new cmpxchg_emu_u8() function that uses a four-byte cmpxchg() to emulate the one-byte variant. Similar patches for emulation of one-byte cmpxchg() for arc, sh, and xtensa have not yet received maintainer acks, so they are slated for the v6.11 merge window" * tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: csky: Emulate one-byte cmpxchg lib: Add one-byte emulation function parisc: add u16 support to cmpxchg() parisc: add missing export of __cmpxchg_u8() parisc: unify implementations of __cmpxchg_u{8,32,64} parisc: __cmpxchg_u32(): lift conversion into the callers sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes sparc32: unify __cmpxchg_u{32,64} sparc32: make the first argument of __cmpxchg_u64() volatile u64 * sparc32: make __cmpxchg_u32() return u32
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/csky/include/asm/cmpxchg.h10
-rw-r--r--arch/parisc/include/asm/cmpxchg.h22
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/lib/bitops.c52
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h20
-rw-r--r--arch/sparc/lib/atomic32.c45
8 files changed, 72 insertions, 83 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index a51addb1abb9..9f2f3060a6ab 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1617,4 +1617,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
# strict alignment always, even with -falign-functions.
def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
+config ARCH_NEED_CMPXCHG_1_EMU
+ bool
+
endmenu
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index d3ac36751ad1..5479707eb5d1 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -37,6 +37,7 @@ config CSKY
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 916043b845f1..db6dda47184e 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_SMP
#include <linux/bug.h>
#include <asm/barrier.h>
+#include <linux/cmpxchg-emu.h>
#define __xchg_relaxed(new, ptr, size) \
({ \
@@ -61,6 +62,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -91,6 +95,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -122,6 +129,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
+ case 1: \
+ __ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
+ break; \
case 4: \
asm volatile ( \
RELEASE_FENCE \
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index c1d776bb16b4..bf0a0f1189eb 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
-/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
-extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
- unsigned int new_);
-extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
+/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
+extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
- switch (size) {
+ return
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
+ size == 8 ? __cmpxchg_u64(ptr, old, new_) :
#endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr,
- (unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6f0c92e8149d..c1587aa35beb 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 36a314199074..9df810050642 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
}
-u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
-
-unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
-{
- unsigned long flags;
- unsigned int prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return (unsigned long)prev;
-}
-
-u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
-{
- unsigned long flags;
- u8 prev;
-
- _atomic_spin_lock_irqsave(ptr, flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- _atomic_spin_unlock_irqrestore(ptr, flags);
- return prev;
-}
+#define CMPXCHG(T) \
+ T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ _atomic_spin_lock_irqsave(ptr, flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ _atomic_spin_unlock_irqrestore(ptr, flags); \
+ return prev; \
+ }
+
+CMPXCHG(u64)
+CMPXCHG(u32)
+CMPXCHG(u16)
+CMPXCHG(u8)
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index d0af82c240b7..8c1a3ca34eeb 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -38,21 +38,19 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
-/* we only need to support cmpxchg of a u32 on sparc */
-unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+u8 __cmpxchg_u8(volatile u8 *m, u8 old, u8 new_);
+u16 __cmpxchg_u16(volatile u16 *m, u16 old, u16 new_);
+u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
- switch (size) {
- case 4:
- return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
- default:
- __cmpxchg_called_with_bad_pointer();
- break;
- }
- return old;
+ return
+ size == 1 ? __cmpxchg_u8(ptr, old, new_) :
+ size == 2 ? __cmpxchg_u16(ptr, old, new_) :
+ size == 4 ? __cmpxchg_u32(ptr, old, new_) :
+ (__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
@@ -63,7 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
+u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new);
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index cf80d1ae352b..8ae880ebf07a 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -159,32 +159,27 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
}
EXPORT_SYMBOL(sp32___change_bit);
-unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
-{
- unsigned long flags;
- u32 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return (unsigned long)prev;
-}
+#define CMPXCHG(T) \
+ T __cmpxchg_##T(volatile T *ptr, T old, T new) \
+ { \
+ unsigned long flags; \
+ T prev; \
+ \
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
+ if ((prev = *ptr) == old) \
+ *ptr = new; \
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
+ \
+ return prev; \
+ }
+
+CMPXCHG(u8)
+CMPXCHG(u16)
+CMPXCHG(u32)
+CMPXCHG(u64)
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
-
-u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
-{
- unsigned long flags;
- u64 prev;
-
- spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
- if ((prev = *ptr) == old)
- *ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
-
- return prev;
-}
EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)