diff --git a/mm/zswap.c b/mm/zswap.c index 16b2ef7223e1..0823cadd02b6 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1595,11 +1595,11 @@ int zswap_load(struct folio *folio) { swp_entry_t swp = folio->swap; pgoff_t offset = swp_offset(swp); - bool swapcache = folio_test_swapcache(folio); struct xarray *tree = swap_zswap_tree(swp); struct zswap_entry *entry; VM_WARN_ON_ONCE(!folio_test_locked(folio)); + VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); if (zswap_never_enabled()) return -ENOENT; @@ -1630,22 +1630,15 @@ int zswap_load(struct folio *folio) count_objcg_events(entry->objcg, ZSWPIN, 1); /* - * When reading into the swapcache, invalidate our entry. The - * swapcache can be the authoritative owner of the page and + * We are reading into the swapcache, invalidate zswap entry. + * The swapcache is the authoritative owner of the page and * its mappings, and the pressure that results from having two * in-memory copies outweighs any benefits of caching the * compression work. - * - * (Most swapins go through the swapcache. The notable - * exception is the singleton fault on SWP_SYNCHRONOUS_IO - * files, which reads into a private page and may free it if - * the fault fails. We remain the primary owner of the entry.) */ - if (swapcache) { - folio_mark_dirty(folio); - xa_erase(tree, offset); - zswap_entry_free(entry); - } + folio_mark_dirty(folio); + xa_erase(tree, offset); + zswap_entry_free(entry); folio_unlock(folio); return 0;