summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/maccess.c2
-rw-r--r--arch/s390/mm/vmem.c4
3 files changed, 5 insertions, 3 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index f4b6fc746fce..989ebd0912b4 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
* The r2t parameter specifies the address of the source table. The
* four pages of the source table are made read-only in the parent gmap
* address space. A write to the source table area @r2t will automatically
- * remove the shadow r2 table and all of its decendents.
+ * remove the shadow r2 table and all of its descendants.
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory and
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index d02a61620cfa..cbe1df1e9c18 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -13,9 +13,9 @@
#include <linux/gfp.h>
#include <linux/cpu.h>
#include <linux/uio.h>
+#include <linux/io.h>
#include <asm/asm-extable.h>
#include <asm/ctl_reg.h>
-#include <asm/io.h>
#include <asm/abs_lowcore.h>
#include <asm/stacktrace.h>
#include <asm/maccess.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b9dcb4ae6c59..b26649233d12 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -481,6 +481,7 @@ static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
*/
static int vmem_add_range(unsigned long start, unsigned long size)
{
+ start = (unsigned long)__va(start);
return add_pagetable(start, start + size, true);
}
@@ -489,6 +490,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{
+ start = (unsigned long)__va(start);
remove_pagetable(start, start + size, true);
}
@@ -556,7 +558,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
* to any physical address. If missing, allocate segment- and region-
* table entries along. Meeting a large segment- or region-table entry
* while traversing is an error, since the function is expected to be
- * called against virtual regions reserverd for 4KB mappings only.
+ * called against virtual regions reserved for 4KB mappings only.
*/
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
{