mirror of
https://github.com/torvalds/linux.git
synced 2026-04-22 16:53:59 -04:00
So far the userland VMA tests have been established as a rough expression of what's been possible. Adapt it into a more usable form by separating out tests and shared helper functions. Since we test functions that are declared statically in mm/vma.c, we make use of the trick of #include'ing kernel C files directly. In order for the tests to continue to function, we must therefore also this way into the tests/ directory. We try to keep as much shared logic actually modularised into a separate compilation unit in shared.c, however the merge_existing() and attach_vma() helpers rely on statically declared mm/vma.c functions so these must be declared in main.c. Link: https://lkml.kernel.org/r/a0455ccfe4fdcd1c962c64f76304f612e5662a4e.1769097829.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <ziy@nvidia.com> Cc: Damien Le Moal <dlemoal@kernel.org> Cc: "Darrick J. Wong" <djwong@kernel.org> Cc: Jarkko Sakkinen <jarkko@kernel.org> Cc: Yury Norov <ynorov@nvidia.com> Cc: Chris Mason <clm@fb.com> Cc: Pedro Falcato <pfalcato@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1470 lines
39 KiB
C
1470 lines
39 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/* Helper function which provides a wrapper around a merge new VMA operation. */
|
|
static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
/*
|
|
* For convenience, get prev and next VMAs. Which the new VMA operation
|
|
* requires.
|
|
*/
|
|
vmg->next = vma_next(vmg->vmi);
|
|
vmg->prev = vma_prev(vmg->vmi);
|
|
vma_iter_next_range(vmg->vmi);
|
|
|
|
vma = vma_merge_new_range(vmg);
|
|
if (vma)
|
|
vma_assert_attached(vma);
|
|
|
|
return vma;
|
|
}
|
|
|
|
/*
|
|
* Helper function which provides a wrapper around the expansion of an existing
|
|
* VMA.
|
|
*/
|
|
static int expand_existing(struct vma_merge_struct *vmg)
|
|
{
|
|
return vma_expand(vmg);
|
|
}
|
|
|
|
/*
|
|
* Helper function to reset merge state the associated VMA iterator to a
|
|
* specified new range.
|
|
*/
|
|
void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags)
|
|
{
|
|
vma_iter_set(vmg->vmi, start);
|
|
|
|
vmg->prev = NULL;
|
|
vmg->middle = NULL;
|
|
vmg->next = NULL;
|
|
vmg->target = NULL;
|
|
|
|
vmg->start = start;
|
|
vmg->end = end;
|
|
vmg->pgoff = pgoff;
|
|
vmg->vm_flags = vm_flags;
|
|
|
|
vmg->just_expand = false;
|
|
vmg->__remove_middle = false;
|
|
vmg->__remove_next = false;
|
|
vmg->__adjust_middle_start = false;
|
|
vmg->__adjust_next_start = false;
|
|
}
|
|
|
|
/* Helper function to set both the VMG range and its anon_vma. */
|
|
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
|
|
struct anon_vma *anon_vma)
|
|
{
|
|
vmg_set_range(vmg, start, end, pgoff, vm_flags);
|
|
vmg->anon_vma = anon_vma;
|
|
}
|
|
|
|
/*
|
|
* Helper function to try to merge a new VMA.
|
|
*
|
|
* Update vmg and the iterator for it and try to merge, otherwise allocate a new
|
|
* VMA, link it to the maple tree and return it.
|
|
*/
|
|
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
|
|
struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags,
|
|
bool *was_merged)
|
|
{
|
|
struct vm_area_struct *merged;
|
|
|
|
vmg_set_range(vmg, start, end, pgoff, vm_flags);
|
|
|
|
merged = merge_new(vmg);
|
|
if (merged) {
|
|
*was_merged = true;
|
|
ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
|
|
return merged;
|
|
}
|
|
|
|
*was_merged = false;
|
|
|
|
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
|
|
|
|
return alloc_and_link_vma(mm, start, end, pgoff, vm_flags);
|
|
}
|
|
|
|
static bool test_simple_merge(void)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
|
|
struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
.start = 0x1000,
|
|
.end = 0x2000,
|
|
.vm_flags = vm_flags,
|
|
.pgoff = 1,
|
|
};
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma_left));
|
|
ASSERT_FALSE(attach_vma(&mm, vma_right));
|
|
|
|
vma = merge_new(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->vm_flags, vm_flags);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_modify(void)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
vm_flags_t flags = VM_READ | VM_MAYREAD;
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, init_vma));
|
|
|
|
/*
|
|
* The flags will not be changed, the vma_modify_flags() function
|
|
* performs the merge/split only.
|
|
*/
|
|
vma = vma_modify_flags(&vmi, init_vma, init_vma,
|
|
0x1000, 0x2000, &flags);
|
|
ASSERT_NE(vma, NULL);
|
|
/* We modify the provided VMA, and on split allocate new VMAs. */
|
|
ASSERT_EQ(vma, init_vma);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x1000);
|
|
ASSERT_EQ(vma->vm_end, 0x2000);
|
|
ASSERT_EQ(vma->vm_pgoff, 1);
|
|
|
|
/*
|
|
* Now walk through the three split VMAs and make sure they are as
|
|
* expected.
|
|
*/
|
|
|
|
vma_iter_set(&vmi, 0);
|
|
vma = vma_iter_load(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x1000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
vma_iter_clear(&vmi);
|
|
|
|
vma = vma_next(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x1000);
|
|
ASSERT_EQ(vma->vm_end, 0x2000);
|
|
ASSERT_EQ(vma->vm_pgoff, 1);
|
|
|
|
detach_free_vma(vma);
|
|
vma_iter_clear(&vmi);
|
|
|
|
vma = vma_next(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x2000);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 2);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_expand(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.vmi = &vmi,
|
|
.target = vma,
|
|
.start = 0,
|
|
.end = 0x3000,
|
|
.pgoff = 0,
|
|
};
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma));
|
|
|
|
ASSERT_FALSE(expand_existing(&vmg));
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_shrink(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma));
|
|
|
|
ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x1000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_a = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_b = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_c = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_d = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
int count;
|
|
struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
|
|
bool merged;
|
|
|
|
if (is_sticky)
|
|
vm_flags |= VM_STICKY;
|
|
|
|
/*
|
|
* 0123456789abc
|
|
* AA B CC
|
|
*/
|
|
vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
|
|
ASSERT_NE(vma_a, NULL);
|
|
if (a_is_sticky)
|
|
vm_flags_set(vma_a, VM_STICKY);
|
|
/* We give each VMA a single avc so we can test anon_vma duplication. */
|
|
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
|
|
|
|
vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
|
|
ASSERT_NE(vma_b, NULL);
|
|
if (b_is_sticky)
|
|
vm_flags_set(vma_b, VM_STICKY);
|
|
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
|
|
|
|
vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags);
|
|
ASSERT_NE(vma_c, NULL);
|
|
if (c_is_sticky)
|
|
vm_flags_set(vma_c, VM_STICKY);
|
|
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
|
|
|
|
/*
|
|
* NO merge.
|
|
*
|
|
* 0123456789abc
|
|
* AA B ** CC
|
|
*/
|
|
vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged);
|
|
ASSERT_NE(vma_d, NULL);
|
|
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
|
|
ASSERT_FALSE(merged);
|
|
ASSERT_EQ(mm.map_count, 4);
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AA*B DD CC
|
|
*/
|
|
vma_a->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma_b->anon_vma = &dummy_anon_vma;
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Merge with A, delete B. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x4000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky || a_is_sticky || b_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Merge to PREVIOUS VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAA* DD CC
|
|
*/
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Extend A. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x5000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky || a_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Merge to NEXT VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAA *DD CC
|
|
*/
|
|
vma_d->anon_vma = &dummy_anon_vma;
|
|
vma_d->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_d);
|
|
/* Prepend. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0x6000);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 6);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky) /* D uses is_sticky. */
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAA*DDD CC
|
|
*/
|
|
vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Merge with A, delete D. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (is_sticky || a_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Merge to NEXT VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAA *CC
|
|
*/
|
|
vma_c->anon_vma = &dummy_anon_vma;
|
|
vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_c);
|
|
/* Prepend C. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0xa000);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0xa);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (is_sticky || c_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAA*CCC
|
|
*/
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Extend A and delete C. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (is_sticky || a_is_sticky || c_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma->vm_flags, VM_STICKY));
|
|
|
|
/*
|
|
* Final state.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAAAAAA
|
|
*/
|
|
|
|
count = 0;
|
|
vma_iter_set(&vmi, 0);
|
|
for_each_vma(vmi, vma) {
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
|
|
detach_free_vma(vma);
|
|
count++;
|
|
}
|
|
|
|
/* Should only have one VMA left (though freed) after all is done.*/
|
|
ASSERT_EQ(count, 1);
|
|
|
|
mtree_destroy(&mm.mm_mt);
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_new(void)
|
|
{
|
|
int i, j, k, l;
|
|
|
|
/* Generate every possible permutation of sticky flags. */
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 2; j++)
|
|
for (k = 0; k < 2; k++)
|
|
for (l = 0; l < 2; l++)
|
|
ASSERT_TRUE(__test_merge_new(i, j, k, l));
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_special_flags(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
vm_flags_t special_flags[] = { VM_IO, VM_DONTEXPAND, VM_PFNMAP, VM_MIXEDMAP };
|
|
vm_flags_t all_special_flags = 0;
|
|
int i;
|
|
struct vm_area_struct *vma_left, *vma;
|
|
|
|
/* Make sure there aren't new VM_SPECIAL flags. */
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
|
|
all_special_flags |= special_flags[i];
|
|
}
|
|
ASSERT_EQ(all_special_flags, VM_SPECIAL);
|
|
|
|
/*
|
|
* 01234
|
|
* AAA
|
|
*/
|
|
vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
ASSERT_NE(vma_left, NULL);
|
|
|
|
/* 1. Set up new VMA with special flag that would otherwise merge. */
|
|
|
|
/*
|
|
* 01234
|
|
* AAA*
|
|
*
|
|
* This should merge if not for the VM_SPECIAL flag.
|
|
*/
|
|
vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags);
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
|
|
vm_flags_t special_flag = special_flags[i];
|
|
|
|
vm_flags_reset(vma_left, vm_flags | special_flag);
|
|
vmg.vm_flags = vm_flags | special_flag;
|
|
vma = merge_new(&vmg);
|
|
ASSERT_EQ(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
}
|
|
|
|
/* 2. Modify VMA with special flag that would otherwise merge. */
|
|
|
|
/*
|
|
* 01234
|
|
* AAAB
|
|
*
|
|
* Create a VMA to modify.
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
|
|
ASSERT_NE(vma, NULL);
|
|
vmg.middle = vma;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
|
|
vm_flags_t special_flag = special_flags[i];
|
|
|
|
vm_flags_reset(vma_left, vm_flags | special_flag);
|
|
vmg.vm_flags = vm_flags | special_flag;
|
|
vma = merge_existing(&vmg);
|
|
ASSERT_EQ(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
}
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_with_close(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct vm_area_struct *vma_prev, *vma_next, *vma;
|
|
|
|
/*
|
|
* When merging VMAs we are not permitted to remove any VMA that has a
|
|
* vm_ops->close() hook.
|
|
*
|
|
* Considering the two possible adjacent VMAs to which a VMA can be
|
|
* merged:
|
|
*
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* In no case will we need to delete prev. If the operation is
|
|
* mergeable, then prev will be extended with one or both of vma and
|
|
* next deleted.
|
|
*
|
|
* As a result, during initial mergeability checks, only
|
|
* can_vma_merge_before() (which implies the VMA being merged with is
|
|
* 'next' as shown above) bothers to check to see whether the next VMA
|
|
* has a vm_ops->close() callback that will need to be called when
|
|
* removed.
|
|
*
|
|
* If it does, then we cannot merge as the resources that the close()
|
|
* operation potentially clears down are tied only to the existing VMA
|
|
* range and we have no way of extending those to the nearly merged one.
|
|
*
|
|
* We must consider two scenarios:
|
|
*
|
|
* A.
|
|
*
|
|
* vm_ops->close: - - !NULL
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* Where prev may or may not be present/mergeable.
|
|
*
|
|
* This is picked up by a specific check in can_vma_merge_before().
|
|
*
|
|
* B.
|
|
*
|
|
* vm_ops->close: - !NULL
|
|
* [ prev ][ vma ]
|
|
*
|
|
* Where prev and vma are present and mergeable.
|
|
*
|
|
* This is picked up by a specific check in the modified VMA merge.
|
|
*
|
|
* IMPORTANT NOTE: We make the assumption that the following case:
|
|
*
|
|
* - !NULL NULL
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* Cannot occur, because vma->vm_ops being the same implies the same
|
|
* vma->vm_file, and therefore this would mean that next->vm_ops->close
|
|
* would be set too, and thus scenario A would pick this up.
|
|
*/
|
|
|
|
/*
|
|
* The only case of a new VMA merge that results in a VMA being deleted
|
|
* is one where both the previous and next VMAs are merged - in this
|
|
* instance the next VMA is deleted, and the previous VMA is extended.
|
|
*
|
|
* If we are unable to do so, we reduce the operation to simply
|
|
* extending the prev VMA and not merging next.
|
|
*
|
|
* 0123456789
|
|
* PPP**NNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
ASSERT_EQ(merge_new(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* When modifying an existing VMA there are further cases where we
|
|
* delete VMAs.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVV
|
|
*
|
|
* In this instance, if vma has a close hook, the merge simply cannot
|
|
* proceed.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
/*
|
|
* The VMA being modified in a way that would otherwise merge should
|
|
* also fail.
|
|
*/
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* This case is mirrored if merging with next.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* VVNNNN
|
|
*
|
|
* In this instance, if vma has a close hook, the merge simply cannot
|
|
* proceed.
|
|
*/
|
|
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
/*
|
|
* Initially this is misapprehended as an out of memory report, as the
|
|
* close() check is handled in the same way as anon_vma duplication
|
|
* failures, however a subsequent patch resolves this.
|
|
*/
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Finally, we consider two variants of the case where we modify a VMA
|
|
* to merge with both the previous and next VMAs.
|
|
*
|
|
* The first variant is where vma has a close hook. In this instance, no
|
|
* merge can proceed.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVVNNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
|
|
|
|
/*
|
|
* The second variant is where next has a close hook. In this instance,
|
|
* we reduce the operation to a merge between prev and vma.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVVNNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPNNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags);
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_new_with_close(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
|
|
struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags);
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct vm_area_struct *vma;
|
|
|
|
/*
|
|
* We should allow the partial merge of a proposed new VMA if the
|
|
* surrounding VMAs have vm_ops->close() hooks (but are otherwise
|
|
* compatible), e.g.:
|
|
*
|
|
* New VMA
|
|
* A v-------v B
|
|
* |-----| |-----|
|
|
* close close
|
|
*
|
|
* Since the rule is to not DELETE a VMA with a close operation, this
|
|
* should be permitted, only rather than expanding A and deleting B, we
|
|
* should simply expand A and leave B intact, e.g.:
|
|
*
|
|
* New VMA
|
|
* A B
|
|
* |------------||-----|
|
|
* close close
|
|
*/
|
|
|
|
/* Have prev and next have a vm_ops->close() hook. */
|
|
vma_prev->vm_ops = &vm_ops;
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags);
|
|
vma = merge_new(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x5000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->vm_ops, &vm_ops);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
vm_flags_t prev_flags = vm_flags;
|
|
vm_flags_t next_flags = vm_flags;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma, *vma_prev, *vma_next;
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct anon_vma_chain avc = {};
|
|
|
|
if (prev_is_sticky)
|
|
prev_flags |= VM_STICKY;
|
|
if (middle_is_sticky)
|
|
vm_flags |= VM_STICKY;
|
|
if (next_is_sticky)
|
|
next_flags |= VM_STICKY;
|
|
|
|
/*
|
|
* Merge right case - partial span.
|
|
*
|
|
* <->
|
|
* 0123456789
|
|
* VVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* VNNNNNN
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
|
|
vma->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
|
|
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
|
|
vmg.middle = vma;
|
|
vmg.prev = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_next->vm_start, 0x3000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_next->vm_pgoff, 3);
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_EQ(vma->vm_start, 0x2000);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 2);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_TRUE(vma_write_started(vma_next));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Merge right case - full span.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* VVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* NNNNNNN
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
|
|
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma);
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_next->vm_start, 0x2000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_next->vm_pgoff, 2);
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_next));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma_next->vm_flags, VM_STICKY));
|
|
|
|
/* Clear down and reset. We should have deleted vma. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Merge left case - partial span.
|
|
*
|
|
* <->
|
|
* 0123456789
|
|
* PPPVVVV
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPV
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
|
|
vma->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x6000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_EQ(vma->vm_start, 0x6000);
|
|
ASSERT_EQ(vma->vm_end, 0x7000);
|
|
ASSERT_EQ(vma->vm_pgoff, 6);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (prev_is_sticky || middle_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Merge left case - full span.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVV
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPP
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (prev_is_sticky || middle_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
|
|
|
|
/* Clear down and reset. We should have deleted vma. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Merge both case.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPPPP
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (prev_is_sticky || middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(IS_SET(vma_prev->vm_flags, VM_STICKY));
|
|
|
|
/* Clear down and reset. We should have deleted prev and next. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Non-merge ranges. the modified VMA merge operation assumes that the
|
|
* caller always specifies ranges within the input VMA so we need only
|
|
* examine these cases.
|
|
*
|
|
* -
|
|
* -
|
|
* -
|
|
* <->
|
|
* <>
|
|
* <>
|
|
* 0123456789a
|
|
* PPPVVVVVNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_existing(void)
|
|
{
|
|
int i, j, k;
|
|
|
|
/* Generate every possible permutation of sticky flags. */
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 2; j++)
|
|
for (k = 0; k < 2; k++)
|
|
ASSERT_TRUE(__test_merge_existing(i, j, k));
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_anon_vma_non_mergeable(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma, *vma_prev, *vma_next;
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_1 = {};
|
|
struct anon_vma_chain dummy_anon_vma_chain_2 = {};
|
|
struct anon_vma dummy_anon_vma_2;
|
|
|
|
/*
|
|
* In the case of modified VMA merge, merging both left and right VMAs
|
|
* but where prev and next have incompatible anon_vma objects, we revert
|
|
* to a merge of prev and VMA:
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPNNN
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
|
|
|
|
/*
|
|
* Give both prev and next single anon_vma_chain fields, so they will
|
|
* merge with the NULL vmg->anon_vma.
|
|
*
|
|
* However, when prev is compared to next, the merge should fail.
|
|
*/
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
|
|
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_FALSE(vma_write_started(vma_next));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Now consider the new VMA case. This is equivalent, only adding a new
|
|
* VMA in a gap between prev and next.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPP****NNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPNNN
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags);
|
|
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL);
|
|
vmg.prev = vma_prev;
|
|
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
|
|
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
|
|
|
|
vmg.anon_vma = NULL;
|
|
ASSERT_EQ(merge_new(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_FALSE(vma_write_started(vma_next));
|
|
|
|
/* Final cleanup. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_dup_anon_vma(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct vm_area_struct *vma_prev, *vma_next, *vma;
|
|
|
|
reset_dummy_anon_vma();
|
|
|
|
/*
|
|
* Expanding a VMA delete the next one duplicates next's anon_vma and
|
|
* assigns it to the expanded VMA.
|
|
*
|
|
* This covers new VMA merging, as these operations amount to a VMA
|
|
* expand.
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags);
|
|
vmg.target = vma_prev;
|
|
vmg.next = vma_next;
|
|
|
|
ASSERT_EQ(expand_existing(&vmg), 0);
|
|
|
|
/* Will have been cloned. */
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
/* Cleanup ready for next run. */
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* next has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*********-------|
|
|
* prev vma next
|
|
* extend delete delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
|
|
|
|
/* Initialise avc so mergeability check passes. */
|
|
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
|
|
|
|
vma_next->anon_vma = &dummy_anon_vma;
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*********-------|
|
|
* prev vma next
|
|
* extend delete delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
|
|
vmg.anon_vma = &dummy_anon_vma;
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*************
|
|
* prev vma
|
|
* extend shrink/delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags);
|
|
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to next.
|
|
*
|
|
* |<----->|
|
|
* *************-------|
|
|
* vma next
|
|
* shrink/delete extend
|
|
*/
|
|
|
|
vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags);
|
|
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_next->vm_start, 0x3000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_next->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_vmi_prealloc_fail(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain avc = {};
|
|
struct vm_area_struct *vma_prev, *vma;
|
|
|
|
/*
|
|
* We are merging vma into prev, with vma possessing an anon_vma, which
|
|
* will be duplicated. We cause the vmi preallocation to fail and assert
|
|
* the duplicated anon_vma is unlinked.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
|
|
fail_prealloc = true;
|
|
|
|
/* This will cause the merge to fail. */
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
|
|
/* We will already have assigned the anon_vma. */
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
/* And it was both cloned and unlinked. */
|
|
ASSERT_TRUE(dummy_anon_vma.was_cloned);
|
|
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
|
|
|
|
cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
|
|
|
|
/*
|
|
* We repeat the same operation for expanding a VMA, which is what new
|
|
* VMA merging ultimately uses too. This asserts that unlinking is
|
|
* performed in this case too.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vma->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags);
|
|
vmg.target = vma_prev;
|
|
vmg.next = vma;
|
|
|
|
fail_prealloc = true;
|
|
ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(dummy_anon_vma.was_cloned);
|
|
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_extend(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
struct vm_area_struct *vma;
|
|
|
|
vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags);
|
|
alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags);
|
|
|
|
/*
|
|
* Extend a VMA into the gap between itself and the following VMA.
|
|
* This should result in a merge.
|
|
*
|
|
* <->
|
|
* * *
|
|
*
|
|
*/
|
|
|
|
ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x4000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_expand_only_mode(void)
|
|
{
|
|
vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma_prev, *vma;
|
|
VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5);
|
|
|
|
/*
|
|
* Place a VMA prior to the one we're expanding so we assert that we do
|
|
* not erroneously try to traverse to the previous VMA even though we
|
|
* have, through the use of the just_expand flag, indicated we do not
|
|
* need to do so.
|
|
*/
|
|
alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags);
|
|
|
|
/*
|
|
* We will be positioned at the prev VMA, but looking to expand to
|
|
* 0x9000.
|
|
*/
|
|
vma_iter_set(&vmi, 0x3000);
|
|
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.just_expand = true;
|
|
|
|
vma = vma_merge_new_range(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vma, vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma->vm_start, 0x3000);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 3);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
|
|
vma_assert_attached(vma);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static void run_merge_tests(int *num_tests, int *num_fail)
|
|
{
|
|
/* Very simple tests to kick the tyres. */
|
|
TEST(simple_merge);
|
|
TEST(simple_modify);
|
|
TEST(simple_expand);
|
|
TEST(simple_shrink);
|
|
|
|
TEST(merge_new);
|
|
TEST(vma_merge_special_flags);
|
|
TEST(vma_merge_with_close);
|
|
TEST(vma_merge_new_with_close);
|
|
TEST(merge_existing);
|
|
TEST(anon_vma_non_mergeable);
|
|
TEST(dup_anon_vma);
|
|
TEST(vmi_prealloc_fail);
|
|
TEST(merge_extend);
|
|
TEST(expand_only_mode);
|
|
}
|