summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/firmware.c27
-rw-r--r--arch/parisc/kernel/pacache.S2
-rw-r--r--arch/parisc/kernel/real2.S5
-rw-r--r--arch/parisc/kernel/sys_parisc.c166
4 files changed, 90 insertions, 110 deletions
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 6817892a2c58..cc124d9f1f7f 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1232,15 +1232,18 @@ int __init pdc_soft_power_info(unsigned long *power_reg)
}
/*
- * pdc_soft_power_button - Control the soft power button behaviour
- * @sw_control: 0 for hardware control, 1 for software control
+ * pdc_soft_power_button{_panic} - Control the soft power button behaviour
+ * @sw_control: 0 for hardware control, 1 for software control
*
*
* This PDC function places the soft power button under software or
* hardware control.
- * Under software control the OS may control to when to allow to shut
- * down the system. Under hardware control pressing the power button
+ * Under software control the OS may control to when to allow to shut
+ * down the system. Under hardware control pressing the power button
* powers off the system immediately.
+ *
+ * The _panic version relies on spin_trylock to prevent deadlock
+ * on panic path.
*/
int pdc_soft_power_button(int sw_control)
{
@@ -1254,6 +1257,22 @@ int pdc_soft_power_button(int sw_control)
return retval;
}
+int pdc_soft_power_button_panic(int sw_control)
+{
+ int retval;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&pdc_lock, flags)) {
+ pr_emerg("Couldn't enable soft power button\n");
+ return -EBUSY; /* ignored by the panic notifier */
+ }
+
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
/*
* pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
* Primarily a problem on T600 (which parisc-linux doesn't support) but
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 9a0018f1f42c..541370d14559 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
+ depi_safe 0, 31,PAGE_SHIFT, %r26 /* Clear any offset bits */
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 4dc12c4c0980..509d18b8e0e6 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -235,9 +235,6 @@ ENTRY_CFI(real64_call_asm)
/* save fn */
copy %arg2, %r31
- /* set up the new ap */
- ldo 64(%arg1), %r29
-
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
@@ -249,7 +246,9 @@ ENTRY_CFI(real64_call_asm)
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+ /* set up real-mode stack and real-mode ap */
tophys_r1 %sp
+ ldo -16(%sp), %r29 /* Reference param save area */
b,l rfi_virt2real,%r2
nop
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 09a34b07f02e..39acccabf2ed 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -25,31 +25,26 @@
#include <linux/random.h>
#include <linux/compat.h>
-/* we construct an artificial offset for the mapping based on the physical
- * address of the kernel mapping variable */
-#define GET_LAST_MMAP(filp) \
- (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
-#define SET_LAST_MMAP(filp, val) \
- { /* nothing */ }
-
-static int get_offset(unsigned int last_mmap)
-{
- return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
-}
+/*
+ * Construct an artificial page offset for the mapping based on the physical
+ * address of the kernel file mapping variable.
+ */
+#define GET_FILP_PGOFF(filp) \
+ (filp ? (((unsigned long) filp->f_mapping) >> 8) \
+ & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
-static unsigned long shared_align_offset(unsigned int last_mmap,
+static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff)
{
- return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
+ return (filp_pgoff + pgoff) << PAGE_SHIFT;
}
static inline unsigned long COLOR_ALIGN(unsigned long addr,
- unsigned int last_mmap, unsigned long pgoff)
+ unsigned long filp_pgoff, unsigned long pgoff)
{
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
unsigned long off = (SHM_COLOUR-1) &
- (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
-
+ shared_align_offset(filp_pgoff, pgoff);
return base + off;
}
@@ -98,126 +93,91 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
return PAGE_ALIGN(STACK_TOP - stack_base);
}
+enum mmap_allocation_direction {UP, DOWN};
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- unsigned long task_size = TASK_SIZE;
- int do_color_align, last_mmap;
+ unsigned long filp_pgoff;
+ int do_color_align;
struct vm_unmapped_area_info info;
- if (len > task_size)
+ if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
- last_mmap = GET_LAST_MMAP(filp);
+ filp_pgoff = GET_FILP_PGOFF(filp);
if (flags & MAP_FIXED) {
- if ((flags & MAP_SHARED) && last_mmap &&
- (addr - shared_align_offset(last_mmap, pgoff))
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+ if (TASK_SIZE - len < addr)
+ return -EINVAL;
+
+ if ((flags & MAP_SHARED) && filp &&
+ (addr - shared_align_offset(filp_pgoff, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
- goto found_addr;
+ return addr;
}
if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+ if (do_color_align)
+ addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
- if (task_size - len >= addr &&
+ if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
- goto found_addr;
+ return addr;
}
- info.flags = 0;
info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(filp_pgoff, pgoff);
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+ VM_BUG_ON(addr != -ENOMEM);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ }
+
+ info.flags = 0;
info.low_limit = mm->mmap_legacy_base;
info.high_limit = mmap_upper_limit(NULL);
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
- addr = vm_unmapped_area(&info);
-
-found_addr:
- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
-
- return addr;
+ return vm_unmapped_area(&info);
}
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
- struct vm_area_struct *vma, *prev;
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int do_color_align, last_mmap;
- struct vm_unmapped_area_info info;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
- last_mmap = GET_LAST_MMAP(filp);
-
- if (flags & MAP_FIXED) {
- if ((flags & MAP_SHARED) && last_mmap &&
- (addr - shared_align_offset(last_mmap, pgoff))
- & (SHM_COLOUR - 1))
- return -EINVAL;
- goto found_addr;
- }
-
- /* requesting a specific address */
- if (addr) {
- if (do_color_align && last_mmap)
- addr = COLOR_ALIGN(addr, last_mmap, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma_prev(mm, addr, &prev);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)) &&
- (!prev || addr >= vm_end_gap(prev)))
- goto found_addr;
- }
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = PAGE_SIZE;
- info.high_limit = mm->mmap_base;
- info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
- info.align_offset = shared_align_offset(last_mmap, pgoff);
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- goto found_addr;
- VM_BUG_ON(addr != -ENOMEM);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-
-found_addr:
- if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
- SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+ return arch_get_unmapped_area_common(filp,
+ addr, len, pgoff, flags, UP);
+}
- return addr;
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr, len, pgoff, flags, DOWN);
}
static int mmap_is_legacy(void)