mirror of
https://github.com/torvalds/linux.git
synced 2026-04-27 11:02:31 -04:00
Even though a tlb_flush() does a flush with invalidate all cache,
we can end up doing an RCU page table free before calling tlb_flush().
That means we can have page walk cache entries even after we free the
page table pages. This can result in us doing wrong page table walk.
Avoid this by doing pwc flush on every page table free. We can't batch
the pwc flush, because the rcu call back function where we free the
page table pages doesn't have information of the mmu gather. Thus we
have to do a pwc on every page table page freed.
Note: I also removed the dummy tlb_flush_pgtable call functions for
hash 32.
Fixes: 1a472c9dba ("powerpc/mm/radix: Add tlbflush routines")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
91 lines
2.3 KiB
C
91 lines
2.3 KiB
C
#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
|
|
#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
|
|
|
|
#define MMU_NO_CONTEXT ~0UL
|
|
|
|
|
|
#include <asm/book3s/64/tlbflush-hash.h>
|
|
#include <asm/book3s/64/tlbflush-radix.h>
|
|
|
|
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_range(vma, start, end);
|
|
return hash__flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
static inline void flush_tlb_kernel_range(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_kernel_range(start, end);
|
|
return hash__flush_tlb_kernel_range(start, end);
|
|
}
|
|
|
|
static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__local_flush_tlb_mm(mm);
|
|
return hash__local_flush_tlb_mm(mm);
|
|
}
|
|
|
|
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__local_flush_tlb_page(vma, vmaddr);
|
|
return hash__local_flush_tlb_page(vma, vmaddr);
|
|
}
|
|
|
|
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_page(vma, vmaddr);
|
|
return hash__flush_tlb_page_nohash(vma, vmaddr);
|
|
}
|
|
|
|
static inline void tlb_flush(struct mmu_gather *tlb)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__tlb_flush(tlb);
|
|
return hash__tlb_flush(tlb);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static inline void flush_tlb_mm(struct mm_struct *mm)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_mm(mm);
|
|
return hash__flush_tlb_mm(mm);
|
|
}
|
|
|
|
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
unsigned long vmaddr)
|
|
{
|
|
if (radix_enabled())
|
|
return radix__flush_tlb_page(vma, vmaddr);
|
|
return hash__flush_tlb_page(vma, vmaddr);
|
|
}
|
|
#else
|
|
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
|
|
#endif /* CONFIG_SMP */
|
|
/*
|
|
* flush the page walk cache for the address
|
|
*/
|
|
static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
|
|
{
|
|
/*
|
|
* Flush the page table walk cache on freeing a page table. We already
|
|
* have marked the upper/higher level page table entry none by now.
|
|
* So it is safe to flush PWC here.
|
|
*/
|
|
if (!radix_enabled())
|
|
return;
|
|
|
|
radix__flush_tlb_pwc(tlb, address);
|
|
}
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
|