summaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2024-09-29 16:42:58 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-10-07 09:28:45 +0200
commitc16e2fdd746c78f5b2ce3c2ab8a26a61b6ed09e5 (patch)
treea55a14d258a9f1f88c31a21aeb2628f0d29e86c1 /kernel/events
parentc5356ab1db28cafc448a50c26ba84442237abb98 (diff)
uprobes: deny mremap(xol_vma)
kernel/events/uprobes.c assumes that xol_area->vaddr is always correct but a malicious application can remap its "[uprobes]" vma to another adress to confuse the kernel. Introduce xol_mremap() to make this impossible. With this change utask->xol_vaddr in xol_free_insn_slot() can't be invalid, we can turn the offset check into WARN_ON_ONCE(offset >= PAGE_SIZE). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240929144258.GA9492@redhat.com
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/uprobes.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c9f1e1e56b15..d3538b6c0831 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1472,9 +1472,15 @@ static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
return 0;
}
+static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+ return -EPERM;
+}
+
static const struct vm_special_mapping xol_mapping = {
.name = "[uprobes]",
.fault = xol_fault,
+ .mremap = xol_mremap,
};
/* Slot allocation for XOL */
@@ -1667,21 +1673,19 @@ static void xol_free_insn_slot(struct uprobe_task *utask)
{
struct xol_area *area = current->mm->uprobes_state.xol_area;
unsigned long offset = utask->xol_vaddr - area->vaddr;
+ unsigned int slot_nr;
utask->xol_vaddr = 0;
- /*
- * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE).
- * This check can only fail if the "[uprobes]" vma was mremap'ed.
- */
- if (offset < PAGE_SIZE) {
- int slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
-
- clear_bit(slot_nr, area->bitmap);
- atomic_dec(&area->slot_count);
- smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
- if (waitqueue_active(&area->wq))
- wake_up(&area->wq);
- }
+ /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */
+ if (WARN_ON_ONCE(offset >= PAGE_SIZE))
+ return;
+
+ slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
+ clear_bit(slot_nr, area->bitmap);
+ atomic_dec(&area->slot_count);
+ smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
+ if (waitqueue_active(&area->wq))
+ wake_up(&area->wq);
}
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,