mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
In order to be able to do this, we need to change VM_DATA_DEFAULT_FLAGS and friends and update the architecture-specific definitions also. We then have to update some KSM logic to handle VMA flags, and introduce VMA_STACK_FLAGS to define the vma_flags_t equivalent of VM_STACK_FLAGS. We also introduce two helper functions for use during the time we are converting legacy flags to vma_flags_t values - vma_flags_to_legacy() and legacy_to_vma_flags(). This enables us to iteratively make changes to break these changes up into separate parts. We use these explicitly here to keep VM_STACK_FLAGS around for certain users which need to maintain the legacy vm_flags_t values for the time being. We are no longer able to rely on the simple VM_xxx being set to zero if the feature is not enabled, so in the case of VM_DROPPABLE we introduce VMA_DROPPABLE as the vma_flags_t equivalent, which is set to EMPTY_VMA_FLAGS if the droppable flag is not available. While we're here, we make the description of do_brk_flags() into a kdoc comment, as it almost was already. We use vma_flags_to_legacy() to not need to update the vm_get_page_prot() logic as this time. Note that in create_init_stack_vma() we have to replace the BUILD_BUG_ON() with a VM_WARN_ON_ONCE() as the tested values are no longer build time available. We also update mprotect_fixup() to use VMA flags where possible, though we have to live with a little duplication between vm_flags_t and vma_flags_t values for the time being until further conversions are made. While we're here, update VM_SPECIAL to be defined in terms of VMA_SPECIAL_FLAGS now we have vma_flags_to_legacy(). Finally, we update the VMA tests to reflect these changes. Link: https://lkml.kernel.org/r/d02e3e45d9a33d7904b149f5604904089fd640ae.1774034900.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Paul Moore <paul@paul-moore.com> [SELinux] Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kees Cook <kees@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ondrej Mosnacek <omosnace@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stephen Smalley <stephen.smalley.work@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
115 lines
3.0 KiB
C
115 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#ifndef _ASM_PAGE_H
|
|
#define _ASM_PAGE_H
|
|
|
|
#include <linux/const.h>
|
|
#include <asm/addrspace.h>
|
|
|
|
#include <vdso/page.h>
|
|
|
|
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG)
|
|
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
|
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
|
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/pfn.h>
|
|
|
|
/*
|
|
* It's normally defined only for FLATMEM config but it's
|
|
* used in our early mem init code for all memory models.
|
|
* So always define it.
|
|
*/
|
|
#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
|
|
|
|
extern void clear_page(void *page);
|
|
extern void copy_page(void *to, void *from);
|
|
|
|
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
|
|
|
extern unsigned long shm_align_mask;
|
|
|
|
struct page;
|
|
struct vm_area_struct;
|
|
void copy_user_highpage(struct page *to, struct page *from,
|
|
unsigned long vaddr, struct vm_area_struct *vma);
|
|
|
|
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
|
|
|
typedef struct { unsigned long pte; } pte_t;
|
|
#define pte_val(x) ((x).pte)
|
|
#define __pte(x) ((pte_t) { (x) })
|
|
typedef struct page *pgtable_t;
|
|
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
|
#define pgd_val(x) ((x).pgd)
|
|
#define __pgd(x) ((pgd_t) { (x) })
|
|
|
|
/*
|
|
* Manipulate page protection bits
|
|
*/
|
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
|
#define pgprot_val(x) ((x).pgprot)
|
|
#define __pgprot(x) ((pgprot_t) { (x) })
|
|
#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
|
|
|
|
#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
|
|
|
|
/*
|
|
* __pa()/__va() should be used only during mem init.
|
|
*/
|
|
#define __pa(x) PHYSADDR(x)
|
|
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
|
|
|
|
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
|
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
|
|
|
|
struct page *dmw_virt_to_page(unsigned long kaddr);
|
|
struct page *tlb_virt_to_page(unsigned long kaddr);
|
|
|
|
#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
|
|
#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
|
|
|
|
#ifndef CONFIG_KFENCE
|
|
|
|
#define page_to_virt(page) __va(page_to_phys(page))
|
|
#define virt_to_page(kaddr) phys_to_page(__pa(kaddr))
|
|
|
|
#else
|
|
|
|
#define WANT_PAGE_VIRTUAL
|
|
|
|
#define page_to_virt(page) \
|
|
({ \
|
|
extern char *__kfence_pool; \
|
|
(__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page); \
|
|
})
|
|
|
|
#define virt_to_page(kaddr) \
|
|
({ \
|
|
(likely((unsigned long)kaddr < vm_map_base)) ? \
|
|
dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
|
|
})
|
|
|
|
#endif
|
|
|
|
#define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
|
|
#define virt_to_pfn(kaddr) page_to_pfn(virt_to_page(kaddr))
|
|
|
|
extern int __virt_addr_valid(volatile void *kaddr);
|
|
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
|
|
|
|
#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
|
|
|
|
#include <asm-generic/memory_model.h>
|
|
#include <asm-generic/getorder.h>
|
|
|
|
#endif /* !__ASSEMBLER__ */
|
|
|
|
#endif /* _ASM_PAGE_H */
|