mm: centralize+fix comments about compound_mapcount() in new sync_with_folio_pmd_zap()

We still mention compound_mapcount() in two comments.

Instead of simply referring to the folio mapcount in both places, let's
factor out the odd-looking PTL sync into sync_with_folio_pmd_zap(), and
add centralized documentation why this is required.

[akpm@linux-foundation.org: update comment per Matthew and David]
Link: https://lkml.kernel.org/r/20260223163920.287720-1-david@kernel.org
Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand (Arm)
2026-02-23 17:39:20 +01:00
committed by Andrew Morton
parent 0562041977
commit 514c2fe992
3 changed files with 22 additions and 16 deletions

View File

@@ -516,6 +516,25 @@ void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
/**
* sync_with_folio_pmd_zap - sync with concurrent zapping of a folio PMD
* @mm: The mm_struct.
* @pmdp: Pointer to the pmd that was found to be pmd_none().
*
* When we find a pmd_none() while unmapping a folio without holding the PTL,
* zap_huge_pmd() may have cleared the PMD but not yet modified the folio to
* indicate that it's unmapped. Skipping the PMD without synchronization could
* make folio unmapping code assume that unmapping failed.
*
* Wait for concurrent zapping to complete by grabbing the PTL.
*/
static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
{
spinlock_t *ptl = pmd_lock(mm, pmdp);
spin_unlock(ptl);
}
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,

View File

@@ -1993,13 +1993,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
} else if (details && details->single_folio &&
folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
/*
* Take and drop THP pmd lock so that we cannot return
* prematurely, while zap_huge_pmd() has cleared *pmd,
* but not yet decremented compound_mapcount().
*/
spin_unlock(ptl);
sync_with_folio_pmd_zap(tlb->mm, pmd);
}
if (pmd_none(*pmd)) {
addr = next;

View File

@@ -269,11 +269,6 @@ restart:
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
} else if (!pmd_present(pmde)) {
/*
* If PVMW_SYNC, take and drop THP pmd lock so that we
* cannot return prematurely, while zap_huge_pmd() has
* cleared *pmd but not decremented compound_mapcount().
*/
const softleaf_t entry = softleaf_from_pmd(pmde);
if (softleaf_is_device_private(entry)) {
@@ -284,11 +279,9 @@ restart:
if ((pvmw->flags & PVMW_SYNC) &&
thp_vma_suitable_order(vma, pvmw->address,
PMD_ORDER) &&
(pvmw->nr_pages >= HPAGE_PMD_NR)) {
spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
(pvmw->nr_pages >= HPAGE_PMD_NR))
sync_with_folio_pmd_zap(mm, pvmw->pmd);
spin_unlock(ptl);
}
step_forward(pvmw, PMD_SIZE);
continue;
}