mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
mm: convert do_brk_flags() to use vma_flags_t
In order to be able to do this, we need to change VM_DATA_DEFAULT_FLAGS and friends and update the architecture-specific definitions also. We then have to update some KSM logic to handle VMA flags, and introduce VMA_STACK_FLAGS to define the vma_flags_t equivalent of VM_STACK_FLAGS. We also introduce two helper functions for use during the time we are converting legacy flags to vma_flags_t values - vma_flags_to_legacy() and legacy_to_vma_flags(). This enables us to iteratively make changes to break these changes up into separate parts. We use these explicitly here to keep VM_STACK_FLAGS around for certain users which need to maintain the legacy vm_flags_t values for the time being. We are no longer able to rely on the simple VM_xxx being set to zero if the feature is not enabled, so in the case of VM_DROPPABLE we introduce VMA_DROPPABLE as the vma_flags_t equivalent, which is set to EMPTY_VMA_FLAGS if the droppable flag is not available. While we're here, we make the description of do_brk_flags() into a kdoc comment, as it almost was already. We use vma_flags_to_legacy() to not need to update the vm_get_page_prot() logic as this time. Note that in create_init_stack_vma() we have to replace the BUILD_BUG_ON() with a VM_WARN_ON_ONCE() as the tested values are no longer build time available. We also update mprotect_fixup() to use VMA flags where possible, though we have to live with a little duplication between vm_flags_t and vma_flags_t values for the time being until further conversions are made. While we're here, update VM_SPECIAL to be defined in terms of VMA_SPECIAL_FLAGS now we have vma_flags_to_legacy(). Finally, we update the VMA tests to reflect these changes. Link: https://lkml.kernel.org/r/d02e3e45d9a33d7904b149f5604904089fd640ae.1774034900.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Paul Moore <paul@paul-moore.com> [SELinux] Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kees Cook <kees@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ondrej Mosnacek <omosnace@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stephen Smalley <stephen.smalley.work@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
bbbc17cb02
commit
3a6455d56b
@@ -131,7 +131,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
|
||||
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
|
||||
|
||||
/* Default Permissions for stack/heaps pages (Non Executable) */
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#define WANT_PAGE_VIRTUAL 1
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ extern int pfn_valid(unsigned long);
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
#include <asm-generic/getorder.h>
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
||||
@@ -46,7 +46,12 @@ int pfn_is_map_memory(unsigned long pfn);
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED)
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
#define VMA_DATA_DEFAULT_FLAGS append_vma_flags(VMA_DATA_FLAGS_TSK_EXEC, \
|
||||
VMA_MTE_ALLOWED_BIT)
|
||||
#else
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
#endif
|
||||
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ struct page;
|
||||
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
|
||||
|
||||
/* Default vm area behavior is non-executable. */
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
||||
|
||||
|
||||
@@ -104,7 +104,7 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
|
||||
extern int __virt_addr_valid(volatile void *kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
@@ -213,7 +213,7 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
|
||||
#define virt_addr_valid(kaddr) \
|
||||
__virt_addr_valid((const volatile void *) (kaddr))
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
extern unsigned long __kaslr_offset;
|
||||
static inline unsigned long kaslr_offset(void)
|
||||
|
||||
@@ -85,7 +85,7 @@ extern struct page *mem_map;
|
||||
# define virt_to_page(vaddr) pfn_to_page(PFN_DOWN(virt_to_phys(vaddr)))
|
||||
# define virt_addr_valid(vaddr) pfn_valid(PFN_DOWN(virt_to_phys(vaddr)))
|
||||
|
||||
# define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
# define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
||||
|
||||
@@ -240,8 +240,8 @@ static inline const void *pfn_to_kaddr(unsigned long pfn)
|
||||
* and needs to be executable. This means the whole heap ends
|
||||
* up being executable.
|
||||
*/
|
||||
#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS32 VMA_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#include <asm/page_64.h>
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS32
|
||||
|
||||
#if defined(CONFIG_PPC_256K_PAGES) || \
|
||||
(defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
|
||||
|
||||
@@ -84,9 +84,9 @@ extern u64 ppc64_pft_size;
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
#define VMA_DATA_DEFAULT_FLAGS \
|
||||
(is_32bit_task() ? \
|
||||
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
|
||||
VMA_DATA_DEFAULT_FLAGS32 : VMA_DATA_DEFAULT_FLAGS64)
|
||||
|
||||
/*
|
||||
* This is the default if a program doesn't have a PT_GNU_STACK
|
||||
@@ -94,12 +94,12 @@ extern u64 ppc64_pft_size;
|
||||
* stack by default, so in the absence of a PT_GNU_STACK program header
|
||||
* we turn execute permission off.
|
||||
*/
|
||||
#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC
|
||||
#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_STACK_DEFAULT_FLAGS32 VMA_DATA_FLAGS_EXEC
|
||||
#define VMA_STACK_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#define VM_STACK_DEFAULT_FLAGS \
|
||||
#define VMA_STACK_DEFAULT_FLAGS \
|
||||
(is_32bit_task() ? \
|
||||
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
|
||||
VMA_STACK_DEFAULT_FLAGS32 : VMA_STACK_DEFAULT_FLAGS64)
|
||||
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
|
||||
@@ -204,7 +204,7 @@ static __always_inline void *pfn_to_kaddr(unsigned long pfn)
|
||||
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
|
||||
})
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
@@ -277,7 +277,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
|
||||
|
||||
#define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug((unsigned long)(kaddr))))
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_NON_EXEC
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
/* Physical address where kernel should be loaded. */
|
||||
#define LOAD_PHYSICAL_ADDR __ALIGN_KERNEL_MASK(CONFIG_PHYSICAL_START, CONFIG_PHYSICAL_ALIGN - 1)
|
||||
|
||||
@@ -9,11 +9,11 @@
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
#else
|
||||
|
||||
#define VM_STACK_DEFAULT_FLAGS (VM_GROWSDOWN | VM_DATA_FLAGS_EXEC)
|
||||
#define VMA_STACK_DEFAULT_FLAGS append_vma_flags(VMA_DATA_FLAGS_EXEC, VMA_GROWSDOWN_BIT)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
#ifdef CONFIG_KSM
|
||||
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, int advice, vm_flags_t *vm_flags);
|
||||
vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
|
||||
vm_flags_t vm_flags);
|
||||
vma_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
|
||||
vma_flags_t vma_flags);
|
||||
int ksm_enable_merge_any(struct mm_struct *mm);
|
||||
int ksm_disable_merge_any(struct mm_struct *mm);
|
||||
int ksm_disable(struct mm_struct *mm);
|
||||
@@ -103,10 +103,10 @@ bool ksm_process_mergeable(struct mm_struct *mm);
|
||||
|
||||
#else /* !CONFIG_KSM */
|
||||
|
||||
static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
|
||||
const struct file *file, vm_flags_t vm_flags)
|
||||
static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
|
||||
const struct file *file, vma_flags_t vma_flags)
|
||||
{
|
||||
return vm_flags;
|
||||
return vma_flags;
|
||||
}
|
||||
|
||||
static inline int ksm_disable(struct mm_struct *mm)
|
||||
|
||||
@@ -346,9 +346,9 @@ enum {
|
||||
* if KVM does not lock down the memory type.
|
||||
*/
|
||||
DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
|
||||
#ifdef CONFIG_PPC32
|
||||
#if defined(CONFIG_PPC32)
|
||||
DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
|
||||
#else
|
||||
#elif defined(CONFIG_64BIT)
|
||||
DECLARE_VMA_BIT(DROPPABLE, 40),
|
||||
#endif
|
||||
DECLARE_VMA_BIT(UFFD_MINOR, 41),
|
||||
@@ -503,31 +503,42 @@ enum {
|
||||
#endif
|
||||
#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
|
||||
#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE)
|
||||
#define VMA_DROPPABLE mk_vma_flags(VMA_DROPPABLE_BIT)
|
||||
#else
|
||||
#define VM_DROPPABLE VM_NONE
|
||||
#define VMA_DROPPABLE EMPTY_VMA_FLAGS
|
||||
#endif
|
||||
|
||||
/* Bits set in the VMA until the stack is in its final location */
|
||||
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
|
||||
|
||||
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
|
||||
#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
|
||||
VMA_EXEC_BIT : VMA_READ_BIT)
|
||||
|
||||
/* Common data flag combinations */
|
||||
#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
|
||||
VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
|
||||
VMA_MAYEXEC_BIT)
|
||||
#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
|
||||
#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
|
||||
VMA_MAYEXEC_BIT)
|
||||
|
||||
#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
|
||||
#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
|
||||
#endif
|
||||
|
||||
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
|
||||
#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
|
||||
#endif
|
||||
|
||||
#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
|
||||
VMA_STACK_BIT, VMA_ACCOUNT_BIT)
|
||||
|
||||
/* Temporary until VMA flags conversion complete. */
|
||||
#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
|
||||
|
||||
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
|
||||
|
||||
#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
|
||||
@@ -536,8 +547,6 @@ enum {
|
||||
#define VM_SEALED_SYSMAP VM_NONE
|
||||
#endif
|
||||
|
||||
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
|
||||
/* VMA basic access permission flags */
|
||||
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
|
||||
#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
|
||||
@@ -545,7 +554,10 @@ enum {
|
||||
/*
|
||||
* Special vmas that are non-mergable, non-mlock()able.
|
||||
*/
|
||||
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
|
||||
|
||||
#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
|
||||
VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
|
||||
#define VM_SPECIAL vma_flags_to_legacy(VMA_SPECIAL_FLAGS)
|
||||
|
||||
/*
|
||||
* Physically remapped pages are special. Tell the
|
||||
@@ -4045,7 +4057,6 @@ extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
|
||||
extern struct file *get_mm_exe_file(struct mm_struct *mm);
|
||||
extern struct file *get_task_exe_file(struct task_struct *task);
|
||||
|
||||
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
|
||||
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
|
||||
|
||||
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
|
||||
|
||||
@@ -1916,4 +1916,7 @@ static inline int get_sysctl_max_map_count(void)
|
||||
return READ_ONCE(sysctl_max_map_count);
|
||||
}
|
||||
|
||||
bool may_expand_vm(struct mm_struct *mm, const vma_flags_t *vma_flags,
|
||||
unsigned long npages);
|
||||
|
||||
#endif /* __MM_INTERNAL_H */
|
||||
|
||||
43
mm/ksm.c
43
mm/ksm.c
@@ -735,21 +735,24 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr,
|
||||
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags)
|
||||
static bool ksm_compatible(const struct file *file, vma_flags_t vma_flags)
|
||||
{
|
||||
if (vm_flags & (VM_SHARED | VM_MAYSHARE | VM_SPECIAL |
|
||||
VM_HUGETLB | VM_DROPPABLE))
|
||||
return false; /* just ignore the advice */
|
||||
|
||||
/* Just ignore the advice. */
|
||||
if (vma_flags_test_any(&vma_flags, VMA_SHARED_BIT, VMA_MAYSHARE_BIT,
|
||||
VMA_HUGETLB_BIT))
|
||||
return false;
|
||||
if (vma_flags_test_single_mask(&vma_flags, VMA_DROPPABLE))
|
||||
return false;
|
||||
if (vma_flags_test_any_mask(&vma_flags, VMA_SPECIAL_FLAGS))
|
||||
return false;
|
||||
if (file_is_dax(file))
|
||||
return false;
|
||||
|
||||
#ifdef VM_SAO
|
||||
if (vm_flags & VM_SAO)
|
||||
if (vma_flags_test(&vma_flags, VMA_SAO_BIT))
|
||||
return false;
|
||||
#endif
|
||||
#ifdef VM_SPARC_ADI
|
||||
if (vm_flags & VM_SPARC_ADI)
|
||||
if (vma_flags_test(&vma_flags, VMA_SPARC_ADI_BIT))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
@@ -758,7 +761,7 @@ static bool ksm_compatible(const struct file *file, vm_flags_t vm_flags)
|
||||
|
||||
static bool vma_ksm_compatible(struct vm_area_struct *vma)
|
||||
{
|
||||
return ksm_compatible(vma->vm_file, vma->vm_flags);
|
||||
return ksm_compatible(vma->vm_file, vma->flags);
|
||||
}
|
||||
|
||||
static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
|
||||
@@ -2825,17 +2828,17 @@ static int ksm_scan_thread(void *nothing)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool __ksm_should_add_vma(const struct file *file, vm_flags_t vm_flags)
|
||||
static bool __ksm_should_add_vma(const struct file *file, vma_flags_t vma_flags)
|
||||
{
|
||||
if (vm_flags & VM_MERGEABLE)
|
||||
if (vma_flags_test(&vma_flags, VMA_MERGEABLE_BIT))
|
||||
return false;
|
||||
|
||||
return ksm_compatible(file, vm_flags);
|
||||
return ksm_compatible(file, vma_flags);
|
||||
}
|
||||
|
||||
static void __ksm_add_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
if (__ksm_should_add_vma(vma->vm_file, vma->vm_flags))
|
||||
if (__ksm_should_add_vma(vma->vm_file, vma->flags))
|
||||
vm_flags_set(vma, VM_MERGEABLE);
|
||||
}
|
||||
|
||||
@@ -2860,16 +2863,16 @@ static int __ksm_del_vma(struct vm_area_struct *vma)
|
||||
*
|
||||
* @mm: Proposed VMA's mm_struct
|
||||
* @file: Proposed VMA's file-backed mapping, if any.
|
||||
* @vm_flags: Proposed VMA"s flags.
|
||||
* @vma_flags: Proposed VMA"s flags.
|
||||
*
|
||||
* Returns: @vm_flags possibly updated to mark mergeable.
|
||||
* Returns: @vma_flags possibly updated to mark mergeable.
|
||||
*/
|
||||
vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
|
||||
vm_flags_t vm_flags)
|
||||
vma_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
|
||||
vma_flags_t vma_flags)
|
||||
{
|
||||
if (mm_flags_test(MMF_VM_MERGE_ANY, mm) &&
|
||||
__ksm_should_add_vma(file, vm_flags)) {
|
||||
vm_flags |= VM_MERGEABLE;
|
||||
__ksm_should_add_vma(file, vma_flags)) {
|
||||
vma_flags_set(&vma_flags, VMA_MERGEABLE_BIT);
|
||||
/*
|
||||
* Generally, the flags here always include MMF_VM_MERGEABLE.
|
||||
* However, in rare cases, this flag may be cleared by ksmd who
|
||||
@@ -2879,7 +2882,7 @@ vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
|
||||
__ksm_enter(mm);
|
||||
}
|
||||
|
||||
return vm_flags;
|
||||
return vma_flags;
|
||||
}
|
||||
|
||||
static void ksm_add_vmas(struct mm_struct *mm)
|
||||
|
||||
13
mm/mmap.c
13
mm/mmap.c
@@ -192,7 +192,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
|
||||
|
||||
brkvma = vma_prev_limit(&vmi, mm->start_brk);
|
||||
/* Ok, looks good - let it rip. */
|
||||
if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
|
||||
if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk,
|
||||
EMPTY_VMA_FLAGS) < 0)
|
||||
goto out;
|
||||
|
||||
mm->brk = brk;
|
||||
@@ -1203,7 +1204,8 @@ out:
|
||||
|
||||
int vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec)
|
||||
{
|
||||
const vm_flags_t vm_flags = is_exec ? VM_EXEC : 0;
|
||||
const vma_flags_t vma_flags = is_exec ?
|
||||
mk_vma_flags(VMA_EXEC_BIT) : EMPTY_VMA_FLAGS;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma = NULL;
|
||||
unsigned long len;
|
||||
@@ -1230,7 +1232,7 @@ int vm_brk_flags(unsigned long addr, unsigned long request, bool is_exec)
|
||||
goto munmap_failed;
|
||||
|
||||
vma = vma_prev(&vmi);
|
||||
ret = do_brk_flags(&vmi, vma, addr, len, vm_flags);
|
||||
ret = do_brk_flags(&vmi, vma, addr, len, vma_flags);
|
||||
populate = ((mm->def_flags & VM_LOCKED) != 0);
|
||||
mmap_write_unlock(mm);
|
||||
userfaultfd_unmap_complete(mm, &uf);
|
||||
@@ -1328,12 +1330,13 @@ destroy:
|
||||
* Return true if the calling process may expand its vm space by the passed
|
||||
* number of pages
|
||||
*/
|
||||
bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
|
||||
bool may_expand_vm(struct mm_struct *mm, const vma_flags_t *vma_flags,
|
||||
unsigned long npages)
|
||||
{
|
||||
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
|
||||
return false;
|
||||
|
||||
if (is_data_mapping(flags) &&
|
||||
if (is_data_mapping_vma_flags(vma_flags) &&
|
||||
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
|
||||
/* Workaround for Valgrind */
|
||||
if (rlimit(RLIMIT_DATA) == 0 &&
|
||||
|
||||
@@ -697,7 +697,8 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, vm_flags_t newflags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
vm_flags_t oldflags = READ_ONCE(vma->vm_flags);
|
||||
const vma_flags_t old_vma_flags = READ_ONCE(vma->flags);
|
||||
vma_flags_t new_vma_flags = legacy_to_vma_flags(newflags);
|
||||
long nrpages = (end - start) >> PAGE_SHIFT;
|
||||
unsigned int mm_cp_flags = 0;
|
||||
unsigned long charged = 0;
|
||||
@@ -706,7 +707,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
|
||||
if (vma_is_sealed(vma))
|
||||
return -EPERM;
|
||||
|
||||
if (newflags == oldflags) {
|
||||
if (vma_flags_same_pair(&old_vma_flags, &new_vma_flags)) {
|
||||
*pprev = vma;
|
||||
return 0;
|
||||
}
|
||||
@@ -717,8 +718,9 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
|
||||
* uncommon case, so doesn't need to be very optimized.
|
||||
*/
|
||||
if (arch_has_pfn_modify_check() &&
|
||||
(oldflags & (VM_PFNMAP|VM_MIXEDMAP)) &&
|
||||
(newflags & VM_ACCESS_FLAGS) == 0) {
|
||||
vma_flags_test_any(&old_vma_flags, VMA_PFNMAP_BIT,
|
||||
VMA_MIXEDMAP_BIT) &&
|
||||
!vma_flags_test_any_mask(&new_vma_flags, VMA_ACCESS_FLAGS)) {
|
||||
pgprot_t new_pgprot = vm_get_page_prot(newflags);
|
||||
|
||||
error = walk_page_range(current->mm, start, end,
|
||||
@@ -736,28 +738,31 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
|
||||
* hugetlb mapping were accounted for even if read-only so there is
|
||||
* no need to account for them here.
|
||||
*/
|
||||
if (newflags & VM_WRITE) {
|
||||
if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT)) {
|
||||
/* Check space limits when area turns into data. */
|
||||
if (!may_expand_vm(mm, newflags, nrpages) &&
|
||||
may_expand_vm(mm, oldflags, nrpages))
|
||||
if (!may_expand_vm(mm, &new_vma_flags, nrpages) &&
|
||||
may_expand_vm(mm, &old_vma_flags, nrpages))
|
||||
return -ENOMEM;
|
||||
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
|
||||
VM_SHARED|VM_NORESERVE))) {
|
||||
if (!vma_flags_test_any(&old_vma_flags,
|
||||
VMA_ACCOUNT_BIT, VMA_WRITE_BIT, VMA_HUGETLB_BIT,
|
||||
VMA_SHARED_BIT, VMA_NORESERVE_BIT)) {
|
||||
charged = nrpages;
|
||||
if (security_vm_enough_memory_mm(mm, charged))
|
||||
return -ENOMEM;
|
||||
newflags |= VM_ACCOUNT;
|
||||
vma_flags_set(&new_vma_flags, VMA_ACCOUNT_BIT);
|
||||
}
|
||||
} else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
|
||||
!vma->anon_vma) {
|
||||
newflags &= ~VM_ACCOUNT;
|
||||
} else if (vma_flags_test(&old_vma_flags, VMA_ACCOUNT_BIT) &&
|
||||
vma_is_anonymous(vma) && !vma->anon_vma) {
|
||||
vma_flags_clear(&new_vma_flags, VMA_ACCOUNT_BIT);
|
||||
}
|
||||
|
||||
newflags = vma_flags_to_legacy(new_vma_flags);
|
||||
vma = vma_modify_flags(vmi, *pprev, vma, start, end, &newflags);
|
||||
if (IS_ERR(vma)) {
|
||||
error = PTR_ERR(vma);
|
||||
goto fail;
|
||||
}
|
||||
new_vma_flags = legacy_to_vma_flags(newflags);
|
||||
|
||||
*pprev = vma;
|
||||
|
||||
@@ -773,19 +778,24 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
|
||||
|
||||
change_protection(tlb, vma, start, end, mm_cp_flags);
|
||||
|
||||
if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
|
||||
if (vma_flags_test(&old_vma_flags, VMA_ACCOUNT_BIT) &&
|
||||
!vma_flags_test(&new_vma_flags, VMA_ACCOUNT_BIT))
|
||||
vm_unacct_memory(nrpages);
|
||||
|
||||
/*
|
||||
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
|
||||
* fault on access.
|
||||
*/
|
||||
if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
|
||||
(newflags & VM_WRITE)) {
|
||||
if (vma_flags_test(&new_vma_flags, VMA_WRITE_BIT)) {
|
||||
const vma_flags_t mask =
|
||||
vma_flags_and(&old_vma_flags, VMA_WRITE_BIT,
|
||||
VMA_SHARED_BIT, VMA_LOCKED_BIT);
|
||||
|
||||
if (vma_flags_same(&mask, VMA_LOCKED_BIT))
|
||||
populate_vma_page_range(vma, start, end, NULL);
|
||||
}
|
||||
|
||||
vm_stat_account(mm, oldflags, -nrpages);
|
||||
vm_stat_account(mm, vma_flags_to_legacy(old_vma_flags), -nrpages);
|
||||
vm_stat_account(mm, newflags, nrpages);
|
||||
perf_event_mmap(vma);
|
||||
return 0;
|
||||
|
||||
@@ -1472,10 +1472,10 @@ static unsigned long mremap_to(struct vma_remap_struct *vrm)
|
||||
|
||||
/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
|
||||
if (vrm->flags & MREMAP_DONTUNMAP) {
|
||||
vm_flags_t vm_flags = vrm->vma->vm_flags;
|
||||
vma_flags_t vma_flags = vrm->vma->flags;
|
||||
unsigned long pages = vrm->old_len >> PAGE_SHIFT;
|
||||
|
||||
if (!may_expand_vm(mm, vm_flags, pages))
|
||||
if (!may_expand_vm(mm, &vma_flags, pages))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -1813,7 +1813,7 @@ static int check_prep_vma(struct vma_remap_struct *vrm)
|
||||
if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta))
|
||||
return -EAGAIN;
|
||||
|
||||
if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
|
||||
if (!may_expand_vm(mm, &vma->flags, vrm->delta >> PAGE_SHIFT))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
|
||||
34
mm/vma.c
34
mm/vma.c
@@ -2385,7 +2385,7 @@ static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
|
||||
static void update_ksm_flags(struct mmap_state *map)
|
||||
{
|
||||
map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags);
|
||||
map->vma_flags = ksm_vma_flags(map->mm, map->file, map->vma_flags);
|
||||
}
|
||||
|
||||
static void set_desc_from_map(struct vm_area_desc *desc,
|
||||
@@ -2446,7 +2446,7 @@ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
|
||||
}
|
||||
|
||||
/* Check against address space limit. */
|
||||
if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages))
|
||||
if (!may_expand_vm(map->mm, &map->vma_flags, map->pglen - vms->nr_pages))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Private writable mapping: check memory availability. */
|
||||
@@ -2866,20 +2866,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* do_brk_flags() - Increase the brk vma if the flags match.
|
||||
* @vmi: The vma iterator
|
||||
* @addr: The start address
|
||||
* @len: The length of the increase
|
||||
* @vma: The vma,
|
||||
* @vm_flags: The VMA Flags
|
||||
* @vma_flags: The VMA Flags
|
||||
*
|
||||
* Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
|
||||
* do not match then create a new anonymous VMA. Eventually we may be able to
|
||||
* do some brk-specific accounting here.
|
||||
*
|
||||
* Returns: %0 on success, or otherwise an error.
|
||||
*/
|
||||
int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long len, vm_flags_t vm_flags)
|
||||
unsigned long addr, unsigned long len, vma_flags_t vma_flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
@@ -2887,9 +2889,12 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
* Check against address space limits by the changed size
|
||||
* Note: This happens *after* clearing old mappings in some code paths.
|
||||
*/
|
||||
vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
|
||||
vm_flags = ksm_vma_flags(mm, NULL, vm_flags);
|
||||
if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT))
|
||||
vma_flags_set_mask(&vma_flags, VMA_DATA_DEFAULT_FLAGS);
|
||||
vma_flags_set(&vma_flags, VMA_ACCOUNT_BIT);
|
||||
vma_flags_set_mask(&vma_flags, mm->def_vma_flags);
|
||||
|
||||
vma_flags = ksm_vma_flags(mm, NULL, vma_flags);
|
||||
if (!may_expand_vm(mm, &vma_flags, len >> PAGE_SHIFT))
|
||||
return -ENOMEM;
|
||||
|
||||
if (mm->map_count > get_sysctl_max_map_count())
|
||||
@@ -2903,7 +2908,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
* occur after forking, so the expand will only happen on new VMAs.
|
||||
*/
|
||||
if (vma && vma->vm_end == addr) {
|
||||
VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr));
|
||||
VMG_STATE(vmg, mm, vmi, addr, addr + len, vma_flags, PHYS_PFN(addr));
|
||||
|
||||
vmg.prev = vma;
|
||||
/* vmi is positioned at prev, which this mode expects. */
|
||||
@@ -2924,8 +2929,8 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
|
||||
vma_set_anonymous(vma);
|
||||
vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
|
||||
vm_flags_init(vma, vm_flags);
|
||||
vma->vm_page_prot = vm_get_page_prot(vm_flags);
|
||||
vma->flags = vma_flags;
|
||||
vma->vm_page_prot = vm_get_page_prot(vma_flags_to_legacy(vma_flags));
|
||||
vma_start_write(vma);
|
||||
if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
|
||||
goto mas_store_fail;
|
||||
@@ -2936,10 +2941,10 @@ out:
|
||||
perf_event_mmap(vma);
|
||||
mm->total_vm += len >> PAGE_SHIFT;
|
||||
mm->data_vm += len >> PAGE_SHIFT;
|
||||
if (vm_flags & VM_LOCKED)
|
||||
if (vma_flags_test(&vma_flags, VMA_LOCKED_BIT))
|
||||
mm->locked_vm += (len >> PAGE_SHIFT);
|
||||
if (pgtable_supports_soft_dirty())
|
||||
vm_flags_set(vma, VM_SOFTDIRTY);
|
||||
vma_set_flags(vma, VMA_SOFTDIRTY_BIT);
|
||||
return 0;
|
||||
|
||||
mas_store_fail:
|
||||
@@ -3070,7 +3075,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
|
||||
unsigned long new_start;
|
||||
|
||||
/* address space limit tests */
|
||||
if (!may_expand_vm(mm, vma->vm_flags, grow))
|
||||
if (!may_expand_vm(mm, &vma->flags, grow))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Stack limit test */
|
||||
@@ -3289,7 +3294,6 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long charged = vma_pages(vma);
|
||||
|
||||
|
||||
if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
14
mm/vma.h
14
mm/vma.h
@@ -237,13 +237,13 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
|
||||
return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
|
||||
}
|
||||
|
||||
#define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_) \
|
||||
#define VMG_STATE(name, mm_, vmi_, start_, end_, vma_flags_, pgoff_) \
|
||||
struct vma_merge_struct name = { \
|
||||
.mm = mm_, \
|
||||
.vmi = vmi_, \
|
||||
.start = start_, \
|
||||
.end = end_, \
|
||||
.vm_flags = vm_flags_, \
|
||||
.vma_flags = vma_flags_, \
|
||||
.pgoff = pgoff_, \
|
||||
.state = VMA_MERGE_START, \
|
||||
}
|
||||
@@ -465,7 +465,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
struct list_head *uf);
|
||||
|
||||
int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
|
||||
unsigned long addr, unsigned long request, unsigned long flags);
|
||||
unsigned long addr, unsigned long request,
|
||||
vma_flags_t vma_flags);
|
||||
|
||||
unsigned long unmapped_area(struct vm_unmapped_area_info *info);
|
||||
unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
|
||||
@@ -527,6 +528,13 @@ static inline bool is_data_mapping(vm_flags_t flags)
|
||||
return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
|
||||
}
|
||||
|
||||
static inline bool is_data_mapping_vma_flags(const vma_flags_t *vma_flags)
|
||||
{
|
||||
const vma_flags_t mask = vma_flags_and(vma_flags,
|
||||
VMA_WRITE_BIT, VMA_SHARED_BIT, VMA_STACK_BIT);
|
||||
|
||||
return vma_flags_same(&mask, VMA_WRITE_BIT);
|
||||
}
|
||||
|
||||
static inline void vma_iter_config(struct vma_iterator *vmi,
|
||||
unsigned long index, unsigned long last)
|
||||
|
||||
@@ -36,7 +36,8 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
|
||||
unsigned long new_start = old_start - shift;
|
||||
unsigned long new_end = old_end - shift;
|
||||
VMA_ITERATOR(vmi, mm, new_start);
|
||||
VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
|
||||
VMG_STATE(vmg, mm, &vmi, new_start, old_end, EMPTY_VMA_FLAGS,
|
||||
vma->vm_pgoff);
|
||||
struct vm_area_struct *next;
|
||||
struct mmu_gather tlb;
|
||||
PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length);
|
||||
@@ -135,7 +136,7 @@ int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
|
||||
* use STACK_TOP because that can depend on attributes which aren't
|
||||
* configured yet.
|
||||
*/
|
||||
BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
|
||||
VM_WARN_ON_ONCE(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
|
||||
vma->vm_end = STACK_TOP_MAX;
|
||||
vma->vm_start = vma->vm_end - PAGE_SIZE;
|
||||
if (pgtable_supports_soft_dirty())
|
||||
|
||||
@@ -7713,6 +7713,8 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
|
||||
|
||||
static __init int selinux_init(void)
|
||||
{
|
||||
vma_flags_t data_default_flags = VMA_DATA_DEFAULT_FLAGS;
|
||||
|
||||
pr_info("SELinux: Initializing.\n");
|
||||
|
||||
memset(&selinux_state, 0, sizeof(selinux_state));
|
||||
@@ -7729,7 +7731,7 @@ static __init int selinux_init(void)
|
||||
AUDIT_CFG_LSM_SECCTX_SUBJECT |
|
||||
AUDIT_CFG_LSM_SECCTX_OBJECT);
|
||||
|
||||
default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
|
||||
default_noexec = !vma_flags_test(&data_default_flags, VMA_EXEC_BIT);
|
||||
if (!default_noexec)
|
||||
pr_notice("SELinux: virtual memory is executable by default\n");
|
||||
|
||||
|
||||
@@ -95,6 +95,3 @@ static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
|
||||
{
|
||||
return PAGE_SIZE;
|
||||
}
|
||||
|
||||
#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
|
||||
VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
|
||||
|
||||
@@ -314,28 +314,34 @@ enum {
|
||||
/* Bits set in the VMA until the stack is in its final location */
|
||||
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
|
||||
|
||||
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
|
||||
#define TASK_EXEC_BIT ((current->personality & READ_IMPLIES_EXEC) ? \
|
||||
VM_EXEC_BIT : VM_READ_BIT)
|
||||
|
||||
/* Common data flag combinations */
|
||||
#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
|
||||
VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VMA_DATA_FLAGS_TSK_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
TASK_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
|
||||
VMA_MAYEXEC_BIT)
|
||||
#define VMA_DATA_FLAGS_NON_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, VMA_MAYEXEC_BIT)
|
||||
#define VMA_DATA_FLAGS_EXEC mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, \
|
||||
VMA_EXEC_BIT, VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT, \
|
||||
VMA_MAYEXEC_BIT)
|
||||
|
||||
#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
|
||||
#ifndef VMA_DATA_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_EXEC
|
||||
#endif
|
||||
|
||||
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
|
||||
#ifndef VMA_STACK_DEFAULT_FLAGS /* arch can override this */
|
||||
#define VMA_STACK_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS
|
||||
#endif
|
||||
|
||||
#define VMA_STACK_FLAGS append_vma_flags(VMA_STACK_DEFAULT_FLAGS, \
|
||||
VMA_STACK_BIT, VMA_ACCOUNT_BIT)
|
||||
/* Temporary until VMA flags conversion complete. */
|
||||
#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
|
||||
|
||||
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
|
||||
|
||||
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
|
||||
/* VMA basic access permission flags */
|
||||
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
|
||||
#define VMA_ACCESS_FLAGS mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_EXEC_BIT)
|
||||
@@ -345,6 +351,9 @@ enum {
|
||||
*/
|
||||
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
|
||||
|
||||
#define VMA_SPECIAL_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_DONTEXPAND_BIT, \
|
||||
VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT)
|
||||
|
||||
#define VMA_REMAP_FLAGS mk_vma_flags(VMA_IO_BIT, VMA_PFNMAP_BIT, \
|
||||
VMA_DONTEXPAND_BIT, VMA_DONTDUMP_BIT)
|
||||
|
||||
@@ -357,11 +366,6 @@ enum {
|
||||
/* This mask represents all the VMA flag bits used by mlock */
|
||||
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
|
||||
|
||||
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
|
||||
|
||||
#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
|
||||
#define RLIMIT_STACK 3 /* max stack size */
|
||||
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
|
||||
|
||||
|
||||
@@ -101,10 +101,10 @@ static inline bool shmem_file(struct file *file)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
|
||||
const struct file *file, vm_flags_t vm_flags)
|
||||
static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
|
||||
const struct file *file, vma_flags_t vma_flags)
|
||||
{
|
||||
return vm_flags;
|
||||
return vma_flags;
|
||||
}
|
||||
|
||||
static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
|
||||
@@ -239,7 +239,8 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
|
||||
static inline bool may_expand_vm(struct mm_struct *mm,
|
||||
const vma_flags_t *vma_flags,
|
||||
unsigned long npages)
|
||||
{
|
||||
return true;
|
||||
|
||||
@@ -1429,11 +1429,10 @@ static bool test_expand_only_mode(void)
|
||||
{
|
||||
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
||||
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
||||
vm_flags_t legacy_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
|
||||
struct mm_struct mm = {};
|
||||
VMA_ITERATOR(vmi, &mm, 0);
|
||||
struct vm_area_struct *vma_prev, *vma;
|
||||
VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, legacy_flags, 5);
|
||||
VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5);
|
||||
|
||||
/*
|
||||
* Place a VMA prior to the one we're expanding so we assert that we do
|
||||
|
||||
Reference in New Issue
Block a user