Merge tag 'riscv-for-linus-6.11-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:

 - Support for various new ISA extensions:
     * The Zve32[xf] and Zve64[xfd] sub-extensios of the vector
       extension
     * Zimop and Zcmop for may-be-operations
     * The Zca, Zcf, Zcd and Zcb sub-extensions of the C extension
     * Zawrs

 - riscv,cpu-intc is now dtschema

 - A handful of performance improvements and cleanups to text patching

 - Support for memory hot{,un}plug

 - The highest user-allocatable virtual address is now visible in
   hwprobe

* tag 'riscv-for-linus-6.11-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (58 commits)
  riscv: lib: relax assembly constraints in hweight
  riscv: set trap vector earlier
  KVM: riscv: selftests: Add Zawrs extension to get-reg-list test
  KVM: riscv: Support guest wrs.nto
  riscv: hwprobe: export Zawrs ISA extension
  riscv: Add Zawrs support for spinlocks
  dt-bindings: riscv: Add Zawrs ISA extension description
  riscv: Provide a definition for 'pause'
  riscv: hwprobe: export highest virtual userspace address
  riscv: Improve sbi_ecall() code generation by reordering arguments
  riscv: Add tracepoints for SBI calls and returns
  riscv: Optimize crc32 with Zbc extension
  riscv: Enable DAX VMEMMAP optimization
  riscv: mm: Add support for ZONE_DEVICE
  virtio-mem: Enable virtio-mem for RISC-V
  riscv: Enable memory hotplugging for RISC-V
  riscv: mm: Take memory hotplug read-lock during kernel page table dump
  riscv: mm: Add memory hotplugging support
  riscv: mm: Add pfn_to_kaddr() implementation
  riscv: mm: Refactor create_linear_mapping_range() for memory hot add
  ...
This commit is contained in:
Linus Torvalds
2024-07-20 09:11:27 -07:00
54 changed files with 1643 additions and 326 deletions

View File

@@ -28,6 +28,7 @@
#include <asm/fixmap.h>
#include <asm/io.h>
#include <asm/kasan.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
@@ -296,7 +297,7 @@ static void __init setup_bootmem(void)
}
#ifdef CONFIG_MMU
struct pt_alloc_ops pt_ops __initdata;
struct pt_alloc_ops pt_ops __meminitdata;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
@@ -358,7 +359,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
static inline pte_t *__meminit get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
@@ -377,7 +378,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pte_late(uintptr_t va)
static phys_addr_t __meminit alloc_pte_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
@@ -385,9 +386,8 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
return __pa((pte_t *)ptdesc_address(ptdesc));
}
static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
uintptr_t pte_idx = pte_index(va);
@@ -441,7 +441,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
static pmd_t *__meminit get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
@@ -458,7 +458,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pmd_late(uintptr_t va)
static phys_addr_t __meminit alloc_pmd_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
@@ -466,9 +466,9 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
return __pa((pmd_t *)ptdesc_address(ptdesc));
}
static void __init create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pte_t *ptep;
phys_addr_t pte_phys;
@@ -504,7 +504,7 @@ static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
}
static pud_t *__init get_pud_virt_late(phys_addr_t pa)
static pud_t *__meminit get_pud_virt_late(phys_addr_t pa)
{
return (pud_t *)__va(pa);
}
@@ -522,7 +522,7 @@ static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pud_late(uintptr_t va)
static phys_addr_t __meminit alloc_pud_late(uintptr_t va)
{
unsigned long vaddr;
@@ -542,7 +542,7 @@ static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
}
static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
static p4d_t *__meminit get_p4d_virt_late(phys_addr_t pa)
{
return (p4d_t *)__va(pa);
}
@@ -560,7 +560,7 @@ static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_p4d_late(uintptr_t va)
static phys_addr_t __meminit alloc_p4d_late(uintptr_t va)
{
unsigned long vaddr;
@@ -569,9 +569,8 @@ static phys_addr_t alloc_p4d_late(uintptr_t va)
return __pa(vaddr);
}
static void __init create_pud_mapping(pud_t *pudp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pmd_t *nextp;
phys_addr_t next_phys;
@@ -596,9 +595,8 @@ static void __init create_pud_mapping(pud_t *pudp,
create_pmd_mapping(nextp, va, pa, sz, prot);
}
static void __init create_p4d_mapping(p4d_t *p4dp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_p4d_mapping(p4d_t *p4dp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pud_t *nextp;
phys_addr_t next_phys;
@@ -654,9 +652,8 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#endif /* __PAGETABLE_PMD_FOLDED */
void __init create_pgd_mapping(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pgd_next_t *nextp;
phys_addr_t next_phys;
@@ -681,8 +678,7 @@ void __init create_pgd_mapping(pgd_t *pgdp,
create_pgd_next_mapping(nextp, va, pa, sz, prot);
}
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
static uintptr_t __meminit best_map_size(phys_addr_t pa, uintptr_t va, phys_addr_t size)
{
if (debug_pagealloc_enabled())
return PAGE_SIZE;
@@ -718,7 +714,7 @@ asmlinkage void __init __copy_data(void)
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static __init pgprot_t pgprot_from_va(uintptr_t va)
static __meminit pgprot_t pgprot_from_va(uintptr_t va)
{
if (is_va_kernel_text(va))
return PAGE_KERNEL_READ_EXEC;
@@ -743,7 +739,7 @@ void mark_rodata_ro(void)
set_memory_ro);
}
#else
static __init pgprot_t pgprot_from_va(uintptr_t va)
static __meminit pgprot_t pgprot_from_va(uintptr_t va)
{
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
return PAGE_KERNEL;
@@ -1235,9 +1231,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pt_ops_set_fixmap();
}
static void __init create_linear_mapping_range(phys_addr_t start,
phys_addr_t end,
uintptr_t fixed_map_size)
static void __meminit create_linear_mapping_range(phys_addr_t start, phys_addr_t end,
uintptr_t fixed_map_size, const pgprot_t *pgprot)
{
phys_addr_t pa;
uintptr_t va, map_size;
@@ -1248,7 +1243,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
best_map_size(pa, va, end - pa);
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
pgprot ? *pgprot : pgprot_from_va(va));
}
}
@@ -1292,22 +1287,19 @@ static void __init create_linear_mapping_page_table(void)
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0);
create_linear_mapping_range(start, end, 0, NULL);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
create_linear_mapping_range(krodata_start,
krodata_start + krodata_size, 0);
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0, NULL);
create_linear_mapping_range(krodata_start, krodata_start + krodata_size, 0, NULL);
memblock_clear_nomap(ktext_start, ktext_size);
memblock_clear_nomap(krodata_start, krodata_size);
#endif
#ifdef CONFIG_KFENCE
create_linear_mapping_range(kfence_pool,
kfence_pool + KFENCE_POOL_SIZE,
PAGE_SIZE);
create_linear_mapping_range(kfence_pool, kfence_pool + KFENCE_POOL_SIZE, PAGE_SIZE, NULL);
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
#endif
@@ -1439,7 +1431,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
* memory hotplug, we are not able to update all the page tables with
* the new PMDs.
*/
return vmemmap_populate_hugepages(start, end, node, NULL);
return vmemmap_populate_hugepages(start, end, node, altmap);
}
#endif
@@ -1493,11 +1485,19 @@ failed:
panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
}
#define PAGE_END KASAN_SHADOW_START
void __init pgtable_cache_init(void)
{
preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
if (IS_ENABLED(CONFIG_MODULES))
preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
preallocate_pgd_pages_range(VMEMMAP_START, VMEMMAP_END, "vmemmap");
preallocate_pgd_pages_range(PAGE_OFFSET, PAGE_END, "direct map");
if (IS_ENABLED(CONFIG_KASAN))
preallocate_pgd_pages_range(KASAN_SHADOW_START, KASAN_SHADOW_END, "kasan");
}
}
#endif
@@ -1534,3 +1534,270 @@ struct execmem_info __init *execmem_arch_setup(void)
}
#endif /* CONFIG_MMU */
#endif /* CONFIG_EXECMEM */
#ifdef CONFIG_MEMORY_HOTPLUG
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
struct page *page = pmd_page(*pmd);
struct ptdesc *ptdesc = page_ptdesc(page);
pte_t *pte;
int i;
for (i = 0; i < PTRS_PER_PTE; i++) {
pte = pte_start + i;
if (!pte_none(*pte))
return;
}
pagetable_pte_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
pagetable_free(ptdesc);
pmd_clear(pmd);
}
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
{
struct page *page = pud_page(*pud);
struct ptdesc *ptdesc = page_ptdesc(page);
pmd_t *pmd;
int i;
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd = pmd_start + i;
if (!pmd_none(*pmd))
return;
}
pagetable_pmd_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
pagetable_free(ptdesc);
pud_clear(pud);
}
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
{
struct page *page = p4d_page(*p4d);
pud_t *pud;
int i;
for (i = 0; i < PTRS_PER_PUD; i++) {
pud = pud_start + i;
if (!pud_none(*pud))
return;
}
if (PageReserved(page))
free_reserved_page(page);
else
free_pages((unsigned long)page_address(page), 0);
p4d_clear(p4d);
}
static void __meminit free_vmemmap_storage(struct page *page, size_t size,
struct vmem_altmap *altmap)
{
int order = get_order(size);
if (altmap) {
vmem_altmap_free(altmap, size >> PAGE_SHIFT);
return;
}
if (PageReserved(page)) {
unsigned int nr_pages = 1 << order;
while (nr_pages--)
free_reserved_page(page++);
return;
}
free_pages((unsigned long)page_address(page), order);
}
static void __meminit remove_pte_mapping(pte_t *pte_base, unsigned long addr, unsigned long end,
bool is_vmemmap, struct vmem_altmap *altmap)
{
unsigned long next;
pte_t *ptep, pte;
for (; addr < end; addr = next) {
next = (addr + PAGE_SIZE) & PAGE_MASK;
if (next > end)
next = end;
ptep = pte_base + pte_index(addr);
pte = ptep_get(ptep);
if (!pte_present(*ptep))
continue;
pte_clear(&init_mm, addr, ptep);
if (is_vmemmap)
free_vmemmap_storage(pte_page(pte), PAGE_SIZE, altmap);
}
}
static void __meminit remove_pmd_mapping(pmd_t *pmd_base, unsigned long addr, unsigned long end,
bool is_vmemmap, struct vmem_altmap *altmap)
{
unsigned long next;
pte_t *pte_base;
pmd_t *pmdp, pmd;
for (; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
pmdp = pmd_base + pmd_index(addr);
pmd = pmdp_get(pmdp);
if (!pmd_present(pmd))
continue;
if (pmd_leaf(pmd)) {
pmd_clear(pmdp);
if (is_vmemmap)
free_vmemmap_storage(pmd_page(pmd), PMD_SIZE, altmap);
continue;
}
pte_base = (pte_t *)pmd_page_vaddr(*pmdp);
remove_pte_mapping(pte_base, addr, next, is_vmemmap, altmap);
free_pte_table(pte_base, pmdp);
}
}
static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, unsigned long end,
bool is_vmemmap, struct vmem_altmap *altmap)
{
unsigned long next;
pud_t *pudp, pud;
pmd_t *pmd_base;
for (; addr < end; addr = next) {
next = pud_addr_end(addr, end);
pudp = pud_base + pud_index(addr);
pud = pudp_get(pudp);
if (!pud_present(pud))
continue;
if (pud_leaf(pud)) {
if (pgtable_l4_enabled) {
pud_clear(pudp);
if (is_vmemmap)
free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap);
}
continue;
}
pmd_base = pmd_offset(pudp, 0);
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
if (pgtable_l4_enabled)
free_pmd_table(pmd_base, pudp);
}
}
static void __meminit remove_p4d_mapping(p4d_t *p4d_base, unsigned long addr, unsigned long end,
bool is_vmemmap, struct vmem_altmap *altmap)
{
unsigned long next;
p4d_t *p4dp, p4d;
pud_t *pud_base;
for (; addr < end; addr = next) {
next = p4d_addr_end(addr, end);
p4dp = p4d_base + p4d_index(addr);
p4d = p4dp_get(p4dp);
if (!p4d_present(p4d))
continue;
if (p4d_leaf(p4d)) {
if (pgtable_l5_enabled) {
p4d_clear(p4dp);
if (is_vmemmap)
free_vmemmap_storage(p4d_page(p4d), P4D_SIZE, altmap);
}
continue;
}
pud_base = pud_offset(p4dp, 0);
remove_pud_mapping(pud_base, addr, next, is_vmemmap, altmap);
if (pgtable_l5_enabled)
free_pud_table(pud_base, p4dp);
}
}
static void __meminit remove_pgd_mapping(unsigned long va, unsigned long end, bool is_vmemmap,
struct vmem_altmap *altmap)
{
unsigned long addr, next;
p4d_t *p4d_base;
pgd_t *pgd;
for (addr = va; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
continue;
if (pgd_leaf(*pgd))
continue;
p4d_base = p4d_offset(pgd, 0);
remove_p4d_mapping(p4d_base, addr, next, is_vmemmap, altmap);
}
flush_tlb_all();
}
static void __meminit remove_linear_mapping(phys_addr_t start, u64 size)
{
unsigned long va = (unsigned long)__va(start);
unsigned long end = (unsigned long)__va(start + size);
remove_pgd_mapping(va, end, false, NULL);
}
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
mhp_range.start = __pa(PAGE_OFFSET);
mhp_range.end = __pa(PAGE_END - 1);
return mhp_range;
}
int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
{
int ret = 0;
create_linear_mapping_range(start, start + size, 0, &params->pgprot);
ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, params);
if (ret) {
remove_linear_mapping(start, size);
goto out;
}
max_pfn = PFN_UP(start + size);
max_low_pfn = max_pfn;
out:
flush_tlb_all();
return ret;
}
void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
__remove_pages(start >> PAGE_SHIFT, size >> PAGE_SHIFT, altmap);
remove_linear_mapping(start, size);
flush_tlb_all();
}
void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
{
remove_pgd_mapping(start, end, true, altmap);
}
#endif /* CONFIG_MEMORY_HOTPLUG */