mm: abstract reading sysctl_max_map_count, and READ_ONCE()

Concurrent reads and writes of sysctl_max_map_count are possible, so we
should READ_ONCE() and WRITE_ONCE().

The sysctl procfs logic already enforces WRITE_ONCE(), so abstract the
read side with get_sysctl_max_map_count().

While we're here, also move the field to mm/internal.h and add the getter
there since only mm interacts with it, there's no need for anybody else to
have access.

Finally, update the VMA userland tests to reflect the change.

Link: https://lkml.kernel.org/r/0715259eb37cbdfde4f9e5db92a20ec7110a1ce5.1773249037.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Cc: Jann Horn <jannh@google.com>
Cc: Jianzhou Zhao <luckd0g@163.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Lorenzo Stoakes (Oracle)
2026-03-11 17:24:37 +00:00
committed by Andrew Morton
parent 9b9b8d4aeb
commit 2d1e54aab6
9 changed files with 24 additions and 12 deletions

View File

@@ -207,8 +207,6 @@ static inline void __mm_zero_struct_page(struct page *page)
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;

View File

@@ -1863,4 +1863,10 @@ static inline int pmdp_test_and_clear_young_notify(struct vm_area_struct *vma,
#endif /* CONFIG_MMU_NOTIFIER */
extern int sysctl_max_map_count;
static inline int get_sysctl_max_map_count(void)
{
return READ_ONCE(sysctl_max_map_count);
}
#endif /* __MM_INTERNAL_H */

View File

@@ -375,7 +375,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
return -EOVERFLOW;
/* Too many mappings? */
if (mm->map_count > sysctl_max_map_count)
if (mm->map_count > get_sysctl_max_map_count())
return -ENOMEM;
/*

View File

@@ -1045,7 +1045,7 @@ static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
* which may not merge, then (if MREMAP_DONTUNMAP is not set) unmap the
* source, which may split, causing a net increase of 2 mappings.
*/
if (current->mm->map_count + 2 > sysctl_max_map_count)
if (current->mm->map_count + 2 > get_sysctl_max_map_count())
return -ENOMEM;
if (vma->vm_ops && vma->vm_ops->may_split) {
@@ -1813,7 +1813,7 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
* net increased map count of 2. In move_vma() we check for headroom of
* 2 additional mappings, so check early to avoid bailing out then.
*/
if (current->mm->map_count + 4 > sysctl_max_map_count)
if (current->mm->map_count + 4 > get_sysctl_max_map_count())
return -ENOMEM;
return 0;

View File

@@ -1317,7 +1317,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
return -ENOMEM;
mm = vma->vm_mm;
if (mm->map_count >= sysctl_max_map_count)
if (mm->map_count >= get_sysctl_max_map_count())
return -ENOMEM;
region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);

View File

@@ -590,7 +590,7 @@ out_free_vma:
static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
if (vma->vm_mm->map_count >= sysctl_max_map_count)
if (vma->vm_mm->map_count >= get_sysctl_max_map_count())
return -ENOMEM;
return __split_vma(vmi, vma, addr, new_below);
@@ -1394,7 +1394,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
* its limit temporarily, to help free resources as expected.
*/
if (vms->end < vms->vma->vm_end &&
vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
vms->vma->vm_mm->map_count >= get_sysctl_max_map_count()) {
error = -ENOMEM;
goto map_count_exceeded;
}
@@ -2868,7 +2868,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT))
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
if (mm->map_count > get_sysctl_max_map_count())
return -ENOMEM;
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))

View File

@@ -21,9 +21,6 @@ extern unsigned long dac_mmap_min_addr;
#define VM_BUG_ON(_expr) (BUG_ON(_expr))
#define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr))
/* We hardcode this for now. */
#define sysctl_max_map_count 0x1000000UL
#define TASK_SIZE ((1ul << 47)-PAGE_SIZE)
/*

View File

@@ -419,6 +419,9 @@ struct vma_iterator {
#define EMPTY_VMA_FLAGS ((vma_flags_t){ })
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
/* What action should be taken after an .mmap_prepare call is complete? */
enum mmap_action_type {
MMAP_NOTHING, /* Mapping is complete, no further action. */
@@ -1342,3 +1345,9 @@ static inline void vma_set_file(struct vm_area_struct *vma, struct file *file)
swap(vma->vm_file, file);
fput(file);
}
extern int sysctl_max_map_count;
static inline int get_sysctl_max_map_count(void)
{
return READ_ONCE(sysctl_max_map_count);
}

View File

@@ -14,6 +14,8 @@
#include "tests/mmap.c"
#include "tests/vma.c"
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
/* Helper functions which utilise static kernel functions. */
struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)