mirror of
https://github.com/torvalds/linux.git
synced 2026-04-20 15:53:59 -04:00
The spare_init() calls memmap_populate() many times to create VA to PA mapping for the VMEMMAP area, where all "struct page" are located once CONFIG_SPARSEMEM_VMEMMAP is defined. These "struct page" are later initialized in the zone_sizes_init() function. However, during this process, no sfence.vma instruction is executed for this VMEMMAP area. This omission may cause the hart to fail to perform page table walk because some data related to the address translation is invisible to the hart. To solve this issue, the local_flush_tlb_kernel_range() is called right after the sparse_init() to execute a sfence.vma instruction for this VMEMMAP area, ensuring that all data related to the address translation is visible to the hart. Fixes:d95f1a542c("RISC-V: Implement sparsemem") Signed-off-by: Vincent Chen <vincent.chen@sifive.com> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com> Link: https://lore.kernel.org/r/20240117140333.2479667-1-vincent.chen@sifive.com Fixes:7a92fc8b4d("mm: Introduce flush_cache_vmap_early()") Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
82 lines
2.6 KiB
C
82 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
*/
|
|
|
|
#ifndef _ASM_RISCV_TLBFLUSH_H
|
|
#define _ASM_RISCV_TLBFLUSH_H
|
|
|
|
#include <linux/mm_types.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/errata_list.h>
|
|
|
|
#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
|
|
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
|
|
|
|
#ifdef CONFIG_MMU
|
|
extern unsigned long asid_mask;
|
|
|
|
static inline void local_flush_tlb_all(void)
|
|
{
|
|
__asm__ __volatile__ ("sfence.vma" : : : "memory");
|
|
}
|
|
|
|
/* Flush one page from local TLB */
|
|
static inline void local_flush_tlb_page(unsigned long addr)
|
|
{
|
|
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
|
|
}
|
|
#else /* CONFIG_MMU */
|
|
#define local_flush_tlb_all() do { } while (0)
|
|
#define local_flush_tlb_page(addr) do { } while (0)
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
|
|
void flush_tlb_all(void);
|
|
void flush_tlb_mm(struct mm_struct *mm);
|
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned long end, unsigned int page_size);
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
|
|
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
|
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
#endif
|
|
|
|
bool arch_tlbbatch_should_defer(struct mm_struct *mm);
|
|
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
|
|
struct mm_struct *mm,
|
|
unsigned long uaddr);
|
|
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
|
|
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
|
|
|
|
#else /* CONFIG_SMP && CONFIG_MMU */
|
|
|
|
#define flush_tlb_all() local_flush_tlb_all()
|
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
local_flush_tlb_all();
|
|
}
|
|
|
|
/* Flush a range of kernel pages */
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
local_flush_tlb_all();
|
|
}
|
|
|
|
#define flush_tlb_mm(mm) flush_tlb_all()
|
|
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
|
|
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
|
|
#endif /* !CONFIG_SMP || !CONFIG_MMU */
|
|
|
|
#endif /* _ASM_RISCV_TLBFLUSH_H */
|