mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
In order to be able to do this, we need to change VM_DATA_DEFAULT_FLAGS and friends and update the architecture-specific definitions also. We then have to update some KSM logic to handle VMA flags, and introduce VMA_STACK_FLAGS to define the vma_flags_t equivalent of VM_STACK_FLAGS. We also introduce two helper functions for use during the time we are converting legacy flags to vma_flags_t values - vma_flags_to_legacy() and legacy_to_vma_flags(). This enables us to iteratively make changes to break these changes up into separate parts. We use these explicitly here to keep VM_STACK_FLAGS around for certain users which need to maintain the legacy vm_flags_t values for the time being. We are no longer able to rely on the simple VM_xxx being set to zero if the feature is not enabled, so in the case of VM_DROPPABLE we introduce VMA_DROPPABLE as the vma_flags_t equivalent, which is set to EMPTY_VMA_FLAGS if the droppable flag is not available. While we're here, we make the description of do_brk_flags() into a kdoc comment, as it almost was already. We use vma_flags_to_legacy() to not need to update the vm_get_page_prot() logic as this time. Note that in create_init_stack_vma() we have to replace the BUILD_BUG_ON() with a VM_WARN_ON_ONCE() as the tested values are no longer build time available. We also update mprotect_fixup() to use VMA flags where possible, though we have to live with a little duplication between vm_flags_t and vma_flags_t values for the time being until further conversions are made. While we're here, update VM_SPECIAL to be defined in terms of VMA_SPECIAL_FLAGS now we have vma_flags_to_legacy(). Finally, we update the VMA tests to reflect these changes. Link: https://lkml.kernel.org/r/d02e3e45d9a33d7904b149f5604904089fd640ae.1774034900.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Paul Moore <paul@paul-moore.com> [SELinux] Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kees Cook <kees@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ondrej Mosnacek <omosnace@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stephen Smalley <stephen.smalley.work@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
435 lines
7.9 KiB
C
435 lines
7.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0+ */
|
|
|
|
#pragma once
|
|
|
|
/*
|
|
* Contains declarations that are STUBBED, that is that are rendered no-ops, in
|
|
* order to faciliate userland VMA testing.
|
|
*/
|
|
|
|
/* Forward declarations. */
|
|
struct mm_struct;
|
|
struct vm_area_struct;
|
|
struct vm_area_desc;
|
|
struct pagetable_move_control;
|
|
struct mmap_action;
|
|
struct file;
|
|
struct anon_vma;
|
|
struct anon_vma_chain;
|
|
struct address_space;
|
|
struct unmap_desc;
|
|
|
|
#define __bitwise
|
|
#define __randomize_layout
|
|
|
|
#define FIRST_USER_ADDRESS 0UL
|
|
#define USER_PGTABLES_CEILING 0UL
|
|
|
|
#define vma_policy(vma) NULL
|
|
|
|
#define down_write_nest_lock(sem, nest_lock)
|
|
|
|
#define data_race(expr) expr
|
|
|
|
#define ASSERT_EXCLUSIVE_WRITER(x)
|
|
|
|
struct vm_userfaultfd_ctx {};
|
|
struct mempolicy {};
|
|
struct mmu_gather {};
|
|
struct mutex {};
|
|
struct vm_fault {};
|
|
|
|
static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
|
|
struct list_head *uf)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void free_pgd_range(struct mmu_gather *tlb,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned long floor, unsigned long ceiling)
|
|
{
|
|
}
|
|
|
|
static inline int ksm_execve(struct mm_struct *mm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void ksm_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void vma_numab_state_init(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void vma_numab_state_free(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
|
|
struct vm_area_struct *new_vma)
|
|
{
|
|
}
|
|
|
|
static inline void free_anon_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void mmap_action_prepare(struct mmap_action *action,
|
|
struct vm_area_desc *desc)
|
|
{
|
|
}
|
|
|
|
static inline int mmap_action_complete(struct mmap_action *action,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline bool shmem_file(struct file *file)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
|
|
const struct file *file, vma_flags_t vma_flags)
|
|
{
|
|
return vma_flags;
|
|
}
|
|
|
|
static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
|
|
{
|
|
}
|
|
|
|
static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn, unsigned long size, pgprot_t pgprot)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
|
|
struct list_head *uf)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/* Currently stubbed but we may later wish to un-stub. */
|
|
static inline void vm_acct_memory(long pages);
|
|
|
|
static inline void mmap_assert_locked(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
|
|
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
|
|
{
|
|
}
|
|
|
|
static inline void i_mmap_unlock_write(struct address_space *mapping)
|
|
{
|
|
}
|
|
|
|
static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end,
|
|
struct list_head *unmaps)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void mmap_write_downgrade(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void mmap_read_unlock(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void mmap_write_unlock(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline int mmap_write_lock_killable(struct mm_struct *mm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool can_modify_mm(struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void arch_unmap(struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
}
|
|
|
|
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
vm_flags_t vm_flags)
|
|
{
|
|
}
|
|
|
|
static inline bool mapping_can_writeback(struct address_space *mapping)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool userfaultfd_wp(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void mmap_assert_write_locked(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void mutex_lock(struct mutex *lock)
|
|
{
|
|
}
|
|
|
|
static inline void mutex_unlock(struct mutex *lock)
|
|
{
|
|
}
|
|
|
|
static inline bool mutex_is_locked(struct mutex *lock)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool signal_pending(void *p)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool is_file_hugepages(struct file *file)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool may_expand_vm(struct mm_struct *mm,
|
|
const vma_flags_t *vma_flags,
|
|
unsigned long npages)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline int shmem_zero_setup(struct vm_area_struct *vma)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
|
|
static inline void vm_acct_memory(long pages)
|
|
{
|
|
}
|
|
|
|
static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
|
|
struct rb_root_cached *rb)
|
|
{
|
|
}
|
|
|
|
static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
|
|
struct rb_root_cached *rb)
|
|
{
|
|
}
|
|
|
|
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
|
|
{
|
|
}
|
|
|
|
static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
|
|
struct rb_root_cached *rb)
|
|
{
|
|
}
|
|
|
|
static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
|
|
struct rb_root_cached *rb)
|
|
{
|
|
}
|
|
|
|
static inline void uprobe_mmap(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void uprobe_munmap(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
|
|
static inline void i_mmap_lock_write(struct address_space *mapping)
|
|
{
|
|
}
|
|
|
|
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
|
{
|
|
}
|
|
|
|
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void ksm_add_vma(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline void perf_event_mmap(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline bool vma_is_dax(struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool arch_validate_flags(vm_flags_t flags)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void vma_close(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool capable(int cap)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
|
struct vm_userfaultfd_ctx vm_ctx)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
|
|
struct anon_vma_name *anon_name2)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void might_sleep(void)
|
|
{
|
|
}
|
|
|
|
static inline void fput(struct file *file)
|
|
{
|
|
}
|
|
|
|
static inline void mpol_put(struct mempolicy *pol)
|
|
{
|
|
}
|
|
|
|
static inline void lru_add_drain(void)
|
|
{
|
|
}
|
|
|
|
static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void update_hiwater_rss(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void update_hiwater_vm(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
|
|
{
|
|
}
|
|
|
|
static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
|
|
{
|
|
}
|
|
|
|
static inline void mapping_unmap_writable(struct address_space *mapping)
|
|
{
|
|
}
|
|
|
|
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
|
|
{
|
|
}
|
|
|
|
static inline void tlb_finish_mmu(struct mmu_gather *tlb)
|
|
{
|
|
}
|
|
|
|
static inline struct file *get_file(struct file *f)
|
|
{
|
|
return f;
|
|
}
|
|
|
|
static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|
unsigned long start,
|
|
unsigned long end,
|
|
struct vm_area_struct *next)
|
|
{
|
|
}
|
|
|
|
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
|
|
|
|
static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
|
|
{
|
|
return false;
|
|
}
|