mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 14:53:58 -04:00
powerpc: implement the new page table range API
Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio(). Change the PG_arch_1 (aka PG_dcache_dirty) flag from being per-page to per-folio. [willy@infradead.org: re-export flush_dcache_icache_folio()] Link: https://lkml.kernel.org/r/ZMx1daYwvD9EM7Cv@casper.infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-22-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
e70bbca607
commit
9fee28baa6
@@ -104,6 +104,7 @@
|
||||
* and every thing below PAGE_SHIFT;
|
||||
*/
|
||||
#define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
|
||||
#define PTE_RPN_SHIFT PAGE_SHIFT
|
||||
/*
|
||||
* set of bits not changed in pmd_modify. Even though we have hash specific bits
|
||||
* in here, on radix we expect them to be zero.
|
||||
@@ -569,11 +570,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
|
||||
return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* Generic modifiers for PTE bits */
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user