summaryrefslogtreecommitdiff
path: root/mm/vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vma.c')
-rw-r--r--mm/vma.c95
1 files changed, 62 insertions, 33 deletions
diff --git a/mm/vma.c b/mm/vma.c
index 3a2098464b8f..f691c1db5b12 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -737,32 +737,30 @@ vmi_complete_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma,
}
/*
- * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
+ * vmi_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
+ * for removal at a later date. Handles splitting first and last if necessary
+ * and marking the vmas as isolated.
+ *
* @vmi: The vma iterator
* @vma: The starting vm_area_struct
* @mm: The mm_struct
* @start: The aligned start address to munmap.
* @end: The aligned end address to munmap.
* @uf: The userfaultfd list_head
- * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
- * success.
+ * @mas_detach: The maple state tracking the detached tree
+ * @locked_vm: a pointer to store the VM_LOCKED pages count.
*
- * Return: 0 on success and drops the lock if so directed, error and leaves the
- * lock held otherwise.
+ * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise
*/
-int
-do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+static int
+vmi_gather_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf, bool unlock)
+ unsigned long end, struct list_head *uf,
+ struct ma_state *mas_detach, unsigned long *locked_vm)
{
struct vm_area_struct *next = NULL;
- struct maple_tree mt_detach;
int count = 0;
int error = -ENOMEM;
- unsigned long locked_vm = 0;
- MA_STATE(mas_detach, &mt_detach, 0, 0);
- mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
- mt_on_stack(mt_detach);
/*
* If we need to split any vma, do it now to save pain later.
@@ -789,8 +787,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
goto start_split_failed;
}
- error = __split_vma(vmi, vma, start, 1);
- if (error)
+ if (__split_vma(vmi, vma, start, 1))
goto start_split_failed;
}
@@ -807,20 +804,18 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Does it split the end? */
if (next->vm_end > end) {
- error = __split_vma(vmi, next, end, 0);
- if (error)
+ if (__split_vma(vmi, next, end, 0))
goto end_split_failed;
}
vma_start_write(next);
- mas_set(&mas_detach, count);
- error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
- if (error)
+ mas_set(mas_detach, count++);
+ if (mas_store_gfp(mas_detach, next, GFP_KERNEL))
goto munmap_gather_failed;
+
vma_mark_detached(next, true);
if (next->vm_flags & VM_LOCKED)
- locked_vm += vma_pages(next);
+ *locked_vm += vma_pages(next);
- count++;
if (unlikely(uf)) {
/*
* If userfaultfd_unmap_prep returns an error the vmas
@@ -831,9 +826,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
* split, despite we could. This is unlikely enough
* failure that it's not worth optimizing it for.
*/
- error = userfaultfd_unmap_prep(next, start, end, uf);
-
- if (error)
+ if (userfaultfd_unmap_prep(next, start, end, uf))
goto userfaultfd_error;
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
@@ -845,7 +838,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */
{
- MA_STATE(test, &mt_detach, 0, 0);
+ MA_STATE(test, mas_detach->tree, 0, 0);
struct vm_area_struct *vma_mas, *vma_test;
int test_count = 0;
@@ -865,6 +858,48 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
while (vma_iter_addr(vmi) > start)
vma_iter_prev_range(vmi);
+ return 0;
+
+userfaultfd_error:
+munmap_gather_failed:
+end_split_failed:
+modify_vma_failed:
+ abort_munmap_vmas(mas_detach);
+start_split_failed:
+map_count_exceeded:
+ return error;
+}
+
+/*
+ * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
+ * @vmi: The vma iterator
+ * @vma: The starting vm_area_struct
+ * @mm: The mm_struct
+ * @start: The aligned start address to munmap.
+ * @end: The aligned end address to munmap.
+ * @uf: The userfaultfd list_head
+ * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
+ * success.
+ *
+ * Return: 0 on success and drops the lock if so directed, error and leaves the
+ * lock held otherwise.
+ */
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock)
+{
+ struct maple_tree mt_detach;
+ MA_STATE(mas_detach, &mt_detach, 0, 0);
+ mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ mt_on_stack(mt_detach);
+ int error;
+ unsigned long locked_vm = 0;
+
+ error = vmi_gather_munmap_vmas(vmi, vma, mm, start, end, uf,
+ &mas_detach, &locked_vm);
+ if (error)
+ goto gather_failed;
+
error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
if (error)
goto clear_tree_failed;
@@ -872,17 +907,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Point of no return */
vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach,
locked_vm);
-
return 0;
-modify_vma_failed:
clear_tree_failed:
-userfaultfd_error:
-munmap_gather_failed:
-end_split_failed:
abort_munmap_vmas(&mas_detach);
-start_split_failed:
-map_count_exceeded:
+gather_failed:
validate_mm(mm);
return error;
}