diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/tools/syscall.tbl | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 23 | ||||
-rw-r--r-- | arch/openrisc/include/asm/spinlock.h | 3 | ||||
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 15 | ||||
-rw-r--r-- | arch/powerpc/include/asm/simple_spinlock.h | 21 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 8 | ||||
-rw-r--r-- | arch/x86/entry/syscalls/syscall_32.tbl | 1 | ||||
-rw-r--r-- | arch/x86/entry/syscalls/syscall_64.tbl | 1 |
10 files changed, 12 insertions, 65 deletions
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index e842209e135d..543100151f2b 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -462,3 +462,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 3cb206aea3db..6bdb5f5db438 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -38,7 +38,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 449 +#define __NR_compat_syscalls 450 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 844f6ae58662..41ea1195e44b 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -903,6 +903,8 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) #define __NR_process_mrelease 448 __SYSCALL(__NR_process_mrelease, sys_process_mrelease) +#define __NR_futex_waitv 449 +__SYSCALL(__NR_futex_waitv, sys_futex_waitv) /* * Please add new compat syscalls above this comment and update diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 864775970c50..0e5c1ad3239c 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) __ticket_spin_unlock(lock); } -static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, - unsigned long flags) -{ - arch_spin_lock(lock); -} -#define arch_spin_lock_flags arch_spin_lock_flags - #ifdef ASM_SUPPORTED static __always_inline void -arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_read_lock(arch_rwlock_t *lock) { + unsigned long flags = 0; + __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" "br.few 3f\n" @@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define arch_read_lock_flags arch_read_lock_flags -#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) - #else /* !ASM_SUPPORTED */ -#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) - #define arch_read_lock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ @@ -186,8 +176,10 @@ do { \ #ifdef ASM_SUPPORTED static __always_inline void -arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_write_lock(arch_rwlock_t *lock) { + unsigned long flags = 0; + __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" "mov ar.ccv = r0\n" @@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define arch_write_lock_flags arch_write_lock_flags -#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) - #define arch_write_trylock(rw) \ ({ \ register long result; \ diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h index a8940bdfcb7e..264944a71535 100644 --- a/arch/openrisc/include/asm/spinlock.h +++ b/arch/openrisc/include/asm/spinlock.h @@ -19,9 +19,6 @@ #include <asm/qrwlock.h> -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) - #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fa5ee8a45dbd..a6e5d66a7656 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x) continue; } -static inline void arch_spin_lock_flags(arch_spinlock_t *x, - unsigned long flags) -{ - volatile unsigned int *a; - - a = __ldcw_align(x); - while (__ldcw(a) == 0) - while (*a == 0) - if (flags & PSW_SM_I) { - local_irq_enable(); - local_irq_disable(); - } -} -#define arch_spin_lock_flags arch_spin_lock_flags - static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 8985791a2ba5..7ae6aeef8464 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) } } -static inline -void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) -{ - unsigned long flags_dis; - - while (1) { - if (likely(__arch_spin_trylock(lock) == 0)) - break; - local_save_flags(flags_dis); - local_irq_restore(flags); - do { - HMT_low(); - if (is_shared_processor()) - splpar_spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - local_irq_restore(flags_dis); - } -} -#define arch_spin_lock_flags arch_spin_lock_flags - static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("# arch_spin_unlock\n\t" diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index ef59588a3042..888a2f1c9ee3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp) arch_spin_lock_wait(lp); } -static inline void arch_spin_lock_flags(arch_spinlock_t *lp, - unsigned long flags) -{ - if (!arch_spin_trylock_once(lp)) - arch_spin_lock_wait(lp); -} -#define arch_spin_lock_flags arch_spin_lock_flags - static inline int arch_spin_trylock(arch_spinlock_t *lp) { if (!arch_spin_trylock_once(lp)) diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 960a021d543e..7e25543693de 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -453,3 +453,4 @@ 446 i386 landlock_restrict_self sys_landlock_restrict_self 447 i386 memfd_secret sys_memfd_secret 448 i386 process_mrelease sys_process_mrelease +449 i386 futex_waitv sys_futex_waitv diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 18b5500ea8bf..fe8f8dd157b4 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -370,6 +370,7 @@ 446 common landlock_restrict_self sys_landlock_restrict_self 447 common memfd_secret sys_memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv # # Due to a historical design error, certain syscalls are numbered differently |