diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 14:41:04 -0700 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/mips/kernel | |
parent | 5fab10041b4389b61de7e7a49893190bae686241 (diff) | |
parent | 2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff) |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/mips-r2-to-r6-emul.c | 24 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 10 |
3 files changed, 18 insertions, 18 deletions
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index d8f1cf1ec370..550e7d03090a 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1200,7 +1200,7 @@ fpu_emul: case lwl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1273,7 +1273,7 @@ fpu_emul: case lwr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1347,7 +1347,7 @@ fpu_emul: case swl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1417,7 +1417,7 @@ fpu_emul: case swr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1492,7 +1492,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1611,7 +1611,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1730,7 +1730,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1848,7 +1848,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1965,7 +1965,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2021,7 +2021,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2084,7 +2084,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2145,7 +2145,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index f1d17ece4181..1dfa7f5796c7 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (unlikely(addr & 3)) return -EINVAL; - if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) + if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) return -EINVAL; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7ed98354fe9d..f806ee56e639 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHW(addr, value, res); else LoadHWE(addr, value, res); @@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadW(addr, value, res); else LoadWE(addr, value, res); @@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHWU(addr, value, res); else LoadHWUE(addr, value, res); @@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreHW(addr, value, res); else StoreHWE(addr, value, res); @@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreW(addr, value, res); else StoreWE(addr, value, res); |