mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Reduce 22 declarations of empty_zero_page to 3 and 23 declarations of ZERO_PAGE() to 4. Every architecture defines empty_zero_page that way or another, but for the most of them it is always a page aligned page in BSS and most definitions of ZERO_PAGE do virt_to_page(empty_zero_page). Move Linus vetted x86 definition of empty_zero_page and ZERO_PAGE() to the core MM and drop these definitions in architectures that do not implement colored zero page (MIPS and s390). ZERO_PAGE() remains a macro because turning it to a wrapper for a static inline causes severe pain in header dependencies. For the most part the change is mechanical, with these being noteworthy: * alpha: aliased empty_zero_page with ZERO_PGE that was also used for boot parameters. Switching to a generic empty_zero_page removes the aliasing and keeps ZERO_PGE for boot parameters only * arm64: uses __pa_symbol() in ZERO_PAGE() so that definition of ZERO_PAGE() is kept intact. * m68k/parisc/um: allocated empty_zero_page from memblock, although they do not support zero page coloring and having it in BSS will work fine. * sparc64 can have empty_zero_page in BSS rather allocate it, but it can't use virt_to_page() for BSS. Keep it's definition of ZERO_PAGE() but instead of allocating it, make mem_map_zero point to empty_zero_page. * sh: used empty_zero_page for boot parameters at the very early boot. Rename the parameters page to boot_params_page and let sh use the generic empty_zero_page. * hexagon: had an amusing comment about empty_zero_page /* A handy thing to have if one has the RAM. Declared in head.S */ that unfortunately had to go :) Link: https://lkml.kernel.org/r/20260211103141.3215197-4-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Helge Deller <deller@gmx.de> [parisc] Tested-by: Helge Deller <deller@gmx.de> [parisc] Reviewed-by: Christophe Leroy (CS GROUP) <chleroy@kernel.org> Acked-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Magnus Lindholm <linmag7@gmail.com> [alpha] Acked-by: Dinh Nguyen <dinguyen@kernel.org> [nios2] Acked-by: Andreas Larsson <andreas@gaisler.com> [sparc] Acked-by: David Hildenbrand (Arm) <david@kernel.org> Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: David S. Miller <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
179 lines
4.9 KiB
C
179 lines
4.9 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/memblock.h>
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
#include <linux/initrd.h>
|
|
#endif
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/module.h>
|
|
#include <linux/highmem.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/arcregs.h>
|
|
|
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
|
|
|
|
static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
|
|
static unsigned long low_mem_sz;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
static unsigned long min_high_pfn, max_high_pfn;
|
|
static phys_addr_t high_mem_start;
|
|
static phys_addr_t high_mem_sz;
|
|
unsigned long arch_pfn_offset;
|
|
EXPORT_SYMBOL(arch_pfn_offset);
|
|
#endif
|
|
|
|
long __init arc_get_mem_sz(void)
|
|
{
|
|
return low_mem_sz;
|
|
}
|
|
|
|
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
|
|
static int __init setup_mem_sz(char *str)
|
|
{
|
|
low_mem_sz = memparse(str, NULL) & PAGE_MASK;
|
|
|
|
/* early console might not be setup yet - it will show up later */
|
|
pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
|
|
|
|
return 0;
|
|
}
|
|
early_param("mem", setup_mem_sz);
|
|
|
|
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
|
{
|
|
int in_use = 0;
|
|
|
|
if (!low_mem_sz) {
|
|
if (base != low_mem_start)
|
|
panic("CONFIG_LINUX_RAM_BASE != DT memory { }");
|
|
|
|
low_mem_sz = size;
|
|
in_use = 1;
|
|
memblock_add_node(base, size, 0, MEMBLOCK_NONE);
|
|
} else {
|
|
#ifdef CONFIG_HIGHMEM
|
|
high_mem_start = base;
|
|
high_mem_sz = size;
|
|
in_use = 1;
|
|
memblock_add_node(base, size, 1, MEMBLOCK_NONE);
|
|
memblock_reserve(base, size);
|
|
#endif
|
|
}
|
|
|
|
pr_info("Memory @ %llx [%lldM] %s\n",
|
|
base, TO_MB(size), !in_use ? "Not used":"");
|
|
}
|
|
|
|
void __init arch_zone_limits_init(unsigned long *max_zone_pfn)
|
|
{
|
|
/*----------------- node/zones setup --------------------------*/
|
|
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
|
|
* For HIGHMEM without PAE max_high_pfn should be less than
|
|
* min_low_pfn to guarantee that these two regions don't overlap.
|
|
* For PAE case highmem is greater than lowmem, so it is natural
|
|
* to use max_high_pfn.
|
|
*
|
|
* In both cases, holes should be handled by pfn_valid().
|
|
*/
|
|
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* First memory setup routine called from setup_arch()
|
|
* 1. setup swapper's mm @init_mm
|
|
* 2. Count the pages we have and setup bootmem allocator
|
|
* 3. zone setup
|
|
*/
|
|
void __init setup_arch_memory(void)
|
|
{
|
|
setup_initial_init_mm(_text, _etext, _edata, _end);
|
|
|
|
/* first page of system - kernel .vector starts here */
|
|
min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
|
|
|
|
/* Last usable page of low mem */
|
|
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
|
|
|
|
/*------------- bootmem allocator setup -----------------------*/
|
|
|
|
/*
|
|
* seed the bootmem allocator after any DT memory node parsing or
|
|
* "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
|
|
*
|
|
* Only low mem is added, otherwise we have crashes when allocating
|
|
* mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
|
|
* avail memory, ending in highmem with a > 32-bit address. However
|
|
* it then tries to memset it with a truncaed 32-bit handle, causing
|
|
* the crash
|
|
*/
|
|
|
|
memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
|
__pa(_end) - CONFIG_LINUX_LINK_BASE);
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
if (phys_initrd_size) {
|
|
memblock_reserve(phys_initrd_start, phys_initrd_size);
|
|
initrd_start = (unsigned long)__va(phys_initrd_start);
|
|
initrd_end = initrd_start + phys_initrd_size;
|
|
}
|
|
#endif
|
|
|
|
early_init_fdt_reserve_self();
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
memblock_dump_all();
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
|
|
* than addresses in normal aka low memory (0x8000_0000 based).
|
|
* Even with PAE, the huge peripheral space hole would waste a lot of
|
|
* mem with single contiguous mem_map[].
|
|
* Thus when HIGHMEM on ARC is enabled the memory map corresponding
|
|
* to the hole is freed and ARC specific version of pfn_valid()
|
|
* handles the hole in the memory map.
|
|
*/
|
|
|
|
min_high_pfn = PFN_DOWN(high_mem_start);
|
|
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
|
|
|
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
|
|
kmap_init();
|
|
#endif /* CONFIG_HIGHMEM */
|
|
}
|
|
|
|
void __init arch_mm_preinit(void)
|
|
{
|
|
#ifdef CONFIG_HIGHMEM
|
|
memblock_phys_free(high_mem_start, high_mem_sz);
|
|
#endif
|
|
|
|
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
|
|
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
|
|
}
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
int pfn_valid(unsigned long pfn)
|
|
{
|
|
return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
|
|
(pfn >= min_low_pfn && pfn <= max_low_pfn);
|
|
}
|
|
EXPORT_SYMBOL(pfn_valid);
|
|
#endif
|