From 5dc62fdd8383afbd2faca6b6e6ea1052b45b0124 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:37 +0100 Subject: MIPS: uaccess: Fix strlen_user with EVA The strlen_user() function calls __strlen_kernel_asm in both branches of the eva_kernel_access() conditional. For EVA it should be calling __strlen_user_eva for user accesses, otherwise it will load from the kernel address space instead of the user address space, and the access checking will likely be ineffective at preventing it due to EVA's overlapping user and kernel address spaces. This was found after extending the test_user_copy module to cover user string access functions, which gave the following error with EVA: test_user_copy: illegal strlen_user passed Fortunately the use of strlen_user() has been all but eradicated from the mainline kernel, so only out of tree modules could be affected. Fixes: e3a9b07a9caf ("MIPS: asm: uaccess: Add EVA support for str*_user operations") Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Cc: # 3.15.x- Patchwork: https://patchwork.linux-mips.org/patch/10842/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5305d694ffe5..3f959c01bfdb 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1384,7 +1384,7 @@ static inline long strlen_user(const char __user *s) might_fault(); __asm__ __volatile__( "move\t$4, %1\n\t" - __MODULE_JAL(__strlen_kernel_asm) + __MODULE_JAL(__strlen_user_asm) "move\t%0, $2" : "=r" (res) : "r" (s) -- cgit v1.2.3-70-g09d2 From 6f06a2c45d8d714ea3b11a360b4a7191e52acaa4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:38 +0100 Subject: MIPS: uaccess: Take EVA into account in __copy_from_user() When EVA is in use, __copy_from_user() was unconditionally using the EVA instructions to read the user address space, however this can also be used for kernel access. If the address isn't a valid user address it will cause an address error or TLB exception, and if it is then user memory may be read instead of kernel memory. For example in the following stack trace from Linux v3.10 (changes since then will prevent this particular one still happening) kernel_sendmsg() set the user address limit to KERNEL_DS, and tcp_sendmsg() goes on to use __copy_from_user() with a kernel address in KSeg0. [<8002d434>] __copy_fromuser_common+0x10c/0x254 [<805710e0>] tcp_sendmsg+0x5f4/0xf00 [<804e8e3c>] sock_sendmsg+0x78/0xa0 [<804e8f28>] kernel_sendmsg+0x24/0x38 [<804ee0f8>] sock_no_sendpage+0x70/0x7c [<8017c820>] pipe_to_sendpage+0x80/0x98 [<8017c6b0>] splice_from_pipe_feed+0xa8/0x198 [<8017cc54>] __splice_from_pipe+0x4c/0x8c [<8017e844>] splice_from_pipe+0x58/0x78 [<8017e884>] generic_splice_sendpage+0x20/0x2c [<8017d690>] do_splice_from+0xb4/0x110 [<8017d710>] direct_splice_actor+0x24/0x30 [<8017d394>] splice_direct_to_actor+0xd8/0x208 [<8017d51c>] do_splice_direct+0x58/0x7c [<8014eaf4>] do_sendfile+0x1dc/0x39c [<8014f82c>] SyS_sendfile+0x90/0xf8 Add the eva_kernel_access() check in __copy_from_user() like the one in copy_from_user(). Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10843/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uaccess.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 3f959c01bfdb..5014e187df23 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_to = (to); \ __cu_from = (from); \ __cu_len = (n); \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ + if (eva_kernel_access()) { \ + __cu_len = __invoke_copy_from_kernel(__cu_to, \ + __cu_from, \ + __cu_len); \ + } else { \ + might_fault(); \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ + __cu_len); \ + } \ __cu_len; \ }) -- cgit v1.2.3-70-g09d2 From d6a428fb583738ad685c91a684748cdee7b2a05f Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 5 Aug 2015 16:41:39 +0100 Subject: MIPS: uaccess: Take EVA into account in [__]clear_user __clear_user() (and clear_user() which uses it), always access the user mode address space, which results in EVA store instructions when EVA is enabled even if the current user address limit is KERNEL_DS. Fix this by adding a new symbol __bzero_kernel for the normal kernel address space bzero in EVA mode, and call that from __clear_user() if eva_kernel_access(). Signed-off-by: James Hogan Cc: Markos Chandras Cc: Paul Burton Cc: Leonid Yegoshin Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10844/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uaccess.h | 32 ++++++++++++++++++++++---------- arch/mips/kernel/mips_ksyms.c | 2 ++ arch/mips/lib/memset.S | 2 ++ 3 files changed, 26 insertions(+), 10 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5014e187df23..2e3b3991cf0b 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1235,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + if (eva_kernel_access()) { + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero_kernel) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + } else { + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + } return res; } diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 291af0b5c482..e2b6ab74643d 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -17,6 +17,7 @@ #include #include +extern void *__bzero_kernel(void *__s, size_t __count); extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char *__from, long __len); @@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva); EXPORT_SYMBOL(__copy_in_user_eva); EXPORT_SYMBOL(__copy_to_user_eva); EXPORT_SYMBOL(__copy_user_inatomic_eva); +EXPORT_SYMBOL(__bzero_kernel); #endif EXPORT_SYMBOL(__bzero); EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index b8e63fd00375..8f0019a2e5c8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -283,6 +283,8 @@ LEAF(memset) 1: #ifndef CONFIG_EVA FEXPORT(__bzero) +#else +FEXPORT(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE -- cgit v1.2.3-70-g09d2 From 930c0f708e156613c37b97cf583c4c6b07d99cce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 25 Dec 2015 12:09:30 -0500 Subject: MIPS: Fix bitrot in __get_user_unaligned() Signed-off-by: Al Viro Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uaccess.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 2e3b3991cf0b..095ecafe6bd3 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -599,7 +599,7 @@ extern void __put_user_unknown(void); * On error, the variable @x is set to zero. */ #define __get_user_unaligned(x,ptr) \ - __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) + __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) /* * Yuck. We need two variants, one for 64bit operation and one @@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void); do { \ switch (size) { \ case 1: __get_data_asm(val, "lb", ptr); break; \ - case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ - case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ + case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \ + case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \ case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ default: __get_user_unaligned_unknown(); break; \ } \ -- cgit v1.2.3-70-g09d2