mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-04 20:19:47 +08:00
mm/vma: use vmg->target to specify target VMA for new VMA merge
In commit 3a75ccba04
("mm: simplify vma merge structure and expand
comments") we introduced the vmg->target field to make the merging of
existing VMAs simpler - clarifying precisely which VMA would eventually
become the merged VMA once the merge operation was complete.
New VMA merging did not get quite the same treatment, retaining the rather
confusing convention of storing the target VMA in vmg->middle.
This patch corrects this state of affairs, utilising vmg->target for this
purpose for both vma_merge_new_range() and also for vma_expand().
We retain the WARN_ON for vmg->middle being specified in
vma_merge_new_range() as doing so would make no sense, but add an
additional debug assert for setting vmg->target.
This patch additionally updates VMA userland testing to account for this
change.
[lorenzo.stoakes@oracle.com: make comment consistent in vma_expand()]
Link: https://lkml.kernel.org/r/c54f45e3-a6ac-4749-93c0-cc9e3080ee37@lucifer.local
Link: https://lkml.kernel.org/r/20250613184807.108089-1-lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9e82db9c0c
commit
4535cb331c
36
mm/vma.c
36
mm/vma.c
@ -1048,6 +1048,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
|
|||||||
|
|
||||||
mmap_assert_write_locked(vmg->mm);
|
mmap_assert_write_locked(vmg->mm);
|
||||||
VM_WARN_ON_VMG(vmg->middle, vmg);
|
VM_WARN_ON_VMG(vmg->middle, vmg);
|
||||||
|
VM_WARN_ON_VMG(vmg->target, vmg);
|
||||||
/* vmi must point at or before the gap. */
|
/* vmi must point at or before the gap. */
|
||||||
VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
|
VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
|
||||||
|
|
||||||
@ -1063,13 +1064,13 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
|
|||||||
/* If we can merge with the next VMA, adjust vmg accordingly. */
|
/* If we can merge with the next VMA, adjust vmg accordingly. */
|
||||||
if (can_merge_right) {
|
if (can_merge_right) {
|
||||||
vmg->end = next->vm_end;
|
vmg->end = next->vm_end;
|
||||||
vmg->middle = next;
|
vmg->target = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we can merge with the previous VMA, adjust vmg accordingly. */
|
/* If we can merge with the previous VMA, adjust vmg accordingly. */
|
||||||
if (can_merge_left) {
|
if (can_merge_left) {
|
||||||
vmg->start = prev->vm_start;
|
vmg->start = prev->vm_start;
|
||||||
vmg->middle = prev;
|
vmg->target = prev;
|
||||||
vmg->pgoff = prev->vm_pgoff;
|
vmg->pgoff = prev->vm_pgoff;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1091,10 +1092,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
|
|||||||
* Now try to expand adjacent VMA(s). This takes care of removing the
|
* Now try to expand adjacent VMA(s). This takes care of removing the
|
||||||
* following VMA if we have VMAs on both sides.
|
* following VMA if we have VMAs on both sides.
|
||||||
*/
|
*/
|
||||||
if (vmg->middle && !vma_expand(vmg)) {
|
if (vmg->target && !vma_expand(vmg)) {
|
||||||
khugepaged_enter_vma(vmg->middle, vmg->flags);
|
khugepaged_enter_vma(vmg->target, vmg->flags);
|
||||||
vmg->state = VMA_MERGE_SUCCESS;
|
vmg->state = VMA_MERGE_SUCCESS;
|
||||||
return vmg->middle;
|
return vmg->target;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1106,27 +1107,29 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
|
|||||||
* @vmg: Describes a VMA expansion operation.
|
* @vmg: Describes a VMA expansion operation.
|
||||||
*
|
*
|
||||||
* Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
|
* Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
|
||||||
* Will expand over vmg->next if it's different from vmg->middle and vmg->end ==
|
* Will expand over vmg->next if it's different from vmg->target and vmg->end ==
|
||||||
* vmg->next->vm_end. Checking if the vmg->middle can expand and merge with
|
* vmg->next->vm_end. Checking if the vmg->target can expand and merge with
|
||||||
* vmg->next needs to be handled by the caller.
|
* vmg->next needs to be handled by the caller.
|
||||||
*
|
*
|
||||||
* Returns: 0 on success.
|
* Returns: 0 on success.
|
||||||
*
|
*
|
||||||
* ASSUMPTIONS:
|
* ASSUMPTIONS:
|
||||||
* - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock.
|
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
|
||||||
* - The caller must have set @vmg->middle and @vmg->next.
|
* - The caller must have set @vmg->target and @vmg->next.
|
||||||
*/
|
*/
|
||||||
int vma_expand(struct vma_merge_struct *vmg)
|
int vma_expand(struct vma_merge_struct *vmg)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *anon_dup = NULL;
|
struct vm_area_struct *anon_dup = NULL;
|
||||||
bool remove_next = false;
|
bool remove_next = false;
|
||||||
struct vm_area_struct *middle = vmg->middle;
|
struct vm_area_struct *target = vmg->target;
|
||||||
struct vm_area_struct *next = vmg->next;
|
struct vm_area_struct *next = vmg->next;
|
||||||
|
|
||||||
|
VM_WARN_ON_VMG(!target, vmg);
|
||||||
|
|
||||||
mmap_assert_write_locked(vmg->mm);
|
mmap_assert_write_locked(vmg->mm);
|
||||||
|
|
||||||
vma_start_write(middle);
|
vma_start_write(target);
|
||||||
if (next && (middle != next) && (vmg->end == next->vm_end)) {
|
if (next && (target != next) && (vmg->end == next->vm_end)) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
remove_next = true;
|
remove_next = true;
|
||||||
@ -1137,19 +1140,18 @@ int vma_expand(struct vma_merge_struct *vmg)
|
|||||||
* In this case we don't report OOM, so vmg->give_up_on_mm is
|
* In this case we don't report OOM, so vmg->give_up_on_mm is
|
||||||
* safe.
|
* safe.
|
||||||
*/
|
*/
|
||||||
ret = dup_anon_vma(middle, next, &anon_dup);
|
ret = dup_anon_vma(target, next, &anon_dup);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Not merging but overwriting any part of next is not handled. */
|
/* Not merging but overwriting any part of next is not handled. */
|
||||||
VM_WARN_ON_VMG(next && !remove_next &&
|
VM_WARN_ON_VMG(next && !remove_next &&
|
||||||
next != middle && vmg->end > next->vm_start, vmg);
|
next != target && vmg->end > next->vm_start, vmg);
|
||||||
/* Only handles expanding */
|
/* Only handles expanding */
|
||||||
VM_WARN_ON_VMG(middle->vm_start < vmg->start ||
|
VM_WARN_ON_VMG(target->vm_start < vmg->start ||
|
||||||
middle->vm_end > vmg->end, vmg);
|
target->vm_end > vmg->end, vmg);
|
||||||
|
|
||||||
vmg->target = middle;
|
|
||||||
if (remove_next)
|
if (remove_next)
|
||||||
vmg->__remove_next = true;
|
vmg->__remove_next = true;
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
|
|||||||
/*
|
/*
|
||||||
* cover the whole range: [new_start, old_end)
|
* cover the whole range: [new_start, old_end)
|
||||||
*/
|
*/
|
||||||
vmg.middle = vma;
|
vmg.target = vma;
|
||||||
if (vma_expand(&vmg))
|
if (vma_expand(&vmg))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -400,7 +400,7 @@ static bool test_simple_expand(void)
|
|||||||
VMA_ITERATOR(vmi, &mm, 0);
|
VMA_ITERATOR(vmi, &mm, 0);
|
||||||
struct vma_merge_struct vmg = {
|
struct vma_merge_struct vmg = {
|
||||||
.vmi = &vmi,
|
.vmi = &vmi,
|
||||||
.middle = vma,
|
.target = vma,
|
||||||
.start = 0,
|
.start = 0,
|
||||||
.end = 0x3000,
|
.end = 0x3000,
|
||||||
.pgoff = 0,
|
.pgoff = 0,
|
||||||
@ -1318,7 +1318,7 @@ static bool test_dup_anon_vma(void)
|
|||||||
vma_next->anon_vma = &dummy_anon_vma;
|
vma_next->anon_vma = &dummy_anon_vma;
|
||||||
|
|
||||||
vmg_set_range(&vmg, 0, 0x5000, 0, flags);
|
vmg_set_range(&vmg, 0, 0x5000, 0, flags);
|
||||||
vmg.middle = vma_prev;
|
vmg.target = vma_prev;
|
||||||
vmg.next = vma_next;
|
vmg.next = vma_next;
|
||||||
|
|
||||||
ASSERT_EQ(expand_existing(&vmg), 0);
|
ASSERT_EQ(expand_existing(&vmg), 0);
|
||||||
@ -1501,7 +1501,7 @@ static bool test_vmi_prealloc_fail(void)
|
|||||||
vma->anon_vma = &dummy_anon_vma;
|
vma->anon_vma = &dummy_anon_vma;
|
||||||
|
|
||||||
vmg_set_range(&vmg, 0, 0x5000, 3, flags);
|
vmg_set_range(&vmg, 0, 0x5000, 3, flags);
|
||||||
vmg.middle = vma_prev;
|
vmg.target = vma_prev;
|
||||||
vmg.next = vma;
|
vmg.next = vma;
|
||||||
|
|
||||||
fail_prealloc = true;
|
fail_prealloc = true;
|
||||||
|
Loading…
Reference in New Issue
Block a user