mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
mm/memory: remove "zap_details" parameter from zap_page_range_single()
Nobody except memory.c should really set that parameter to non-NULL. So let's just drop it and make unmap_mapping_range_vma() use zap_page_range_single_batched() instead. [david@kernel.org: format on a single line] Link: https://lkml.kernel.org/r/8a27e9ac-2025-4724-a46d-0a7c90894ba7@kernel.org Link: https://lkml.kernel.org/r/20260227200848.114019-3-david@kernel.org Signed-off-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Puranjay Mohan <puranjay@kernel.org> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Arve <arve@android.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Carlos Llamas <cmllamas@google.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Dave Airlie <airlied@gmail.com> Cc: David Ahern <dsahern@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hartley Sweeten <hsweeten@visionengravers.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ian Abbott <abbotti@mev.co.uk> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jakub Kacinski <kuba@kernel.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jarkko Sakkinen <jarkko@kernel.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Namhyung kim <namhyung@kernel.org> Cc: Neal Cardwell <ncardwell@google.com> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Todd Kjos <tkjos@android.com> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
c48ad5a4b8
commit
de008c9ba5
@@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo
|
||||
if (!vma)
|
||||
return;
|
||||
if (!is_vm_hugetlb_page(vma))
|
||||
zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL);
|
||||
zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr);
|
||||
vmaddr = vma->vm_end;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
if (vma) {
|
||||
trace_binder_unmap_user_start(alloc, index);
|
||||
|
||||
zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
|
||||
zap_page_range_single(vma, page_addr, PAGE_SIZE);
|
||||
|
||||
trace_binder_unmap_user_end(alloc, index);
|
||||
}
|
||||
|
||||
@@ -2804,11 +2804,10 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *details);
|
||||
unsigned long size);
|
||||
static inline void zap_vma_pages(struct vm_area_struct *vma)
|
||||
{
|
||||
zap_page_range_single(vma, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start, NULL);
|
||||
zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
}
|
||||
struct mmu_notifier_range;
|
||||
|
||||
|
||||
@@ -656,8 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
|
||||
guard(mutex)(&arena->lock);
|
||||
/* iterate link list under lock */
|
||||
list_for_each_entry(vml, &arena->vma_list, head)
|
||||
zap_page_range_single(vml->vma, uaddr,
|
||||
PAGE_SIZE * page_cnt, NULL);
|
||||
zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt);
|
||||
}
|
||||
|
||||
static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable)
|
||||
|
||||
@@ -7213,7 +7213,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
|
||||
#ifdef CONFIG_MMU
|
||||
/* Clear any partial mappings on error. */
|
||||
if (err)
|
||||
zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL);
|
||||
zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
return err;
|
||||
|
||||
@@ -1193,8 +1193,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior)
|
||||
* OK some of the range have non-guard pages mapped, zap
|
||||
* them. This leaves existing guard pages in place.
|
||||
*/
|
||||
zap_page_range_single(vma, range->start,
|
||||
range->end - range->start, NULL);
|
||||
zap_page_range_single(vma, range->start, range->end - range->start);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
16
mm/memory.c
16
mm/memory.c
@@ -2203,17 +2203,16 @@ void zap_page_range_single_batched(struct mmu_gather *tlb,
|
||||
* @vma: vm_area_struct holding the applicable pages
|
||||
* @address: starting address of pages to zap
|
||||
* @size: number of bytes to zap
|
||||
* @details: details of shared cache invalidation
|
||||
*
|
||||
* The range must fit into one VMA.
|
||||
*/
|
||||
void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *details)
|
||||
unsigned long size)
|
||||
{
|
||||
struct mmu_gather tlb;
|
||||
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
zap_page_range_single_batched(&tlb, vma, address, size, details);
|
||||
zap_page_range_single_batched(&tlb, vma, address, size, NULL);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
@@ -2235,7 +2234,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
!(vma->vm_flags & VM_PFNMAP))
|
||||
return;
|
||||
|
||||
zap_page_range_single(vma, address, size, NULL);
|
||||
zap_page_range_single(vma, address, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zap_vma_ptes);
|
||||
|
||||
@@ -3003,7 +3002,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add
|
||||
* maintain page reference counts, and callers may free
|
||||
* pages due to the error. So zap it early.
|
||||
*/
|
||||
zap_page_range_single(vma, addr, size, NULL);
|
||||
zap_page_range_single(vma, addr, size);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -4226,7 +4225,12 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
|
||||
unsigned long start_addr, unsigned long end_addr,
|
||||
struct zap_details *details)
|
||||
{
|
||||
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
|
||||
struct mmu_gather tlb;
|
||||
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
zap_page_range_single_batched(&tlb, vma, start_addr,
|
||||
end_addr - start_addr, details);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
|
||||
|
||||
@@ -2105,7 +2105,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
|
||||
maybe_zap_len = total_bytes_to_map - /* All bytes to map */
|
||||
*length + /* Mapped or pending */
|
||||
(pages_remaining * PAGE_SIZE); /* Failed map. */
|
||||
zap_page_range_single(vma, *address, maybe_zap_len, NULL);
|
||||
zap_page_range_single(vma, *address, maybe_zap_len);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
@@ -2270,8 +2270,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
|
||||
total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
|
||||
if (total_bytes_to_map) {
|
||||
if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
|
||||
zap_page_range_single(vma, address, total_bytes_to_map,
|
||||
NULL);
|
||||
zap_page_range_single(vma, address, total_bytes_to_map);
|
||||
zc->length = total_bytes_to_map;
|
||||
zc->recv_skip_hint = 0;
|
||||
} else {
|
||||
|
||||
@@ -123,9 +123,7 @@ impl VmaRef {
|
||||
// SAFETY: By the type invariants, the caller has read access to this VMA, which is
|
||||
// sufficient for this method call. This method has no requirements on the vma flags. The
|
||||
// address range is checked to be within the vma.
|
||||
unsafe {
|
||||
bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
|
||||
};
|
||||
unsafe { bindings::zap_page_range_single(self.as_ptr(), address, size) };
|
||||
}
|
||||
|
||||
/// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise
|
||||
|
||||
Reference in New Issue
Block a user