mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
memcg: consolidate private id refcount get/put helpers
We currently have two different sets of helpers for getting or putting the private IDs' refcount for order 0 and large folios. This is redundant. Just use one and always acquire the refcount of the swapout folio size unless it's zero, and put the refcount using the folio size if the charge failed, since the folio size can't change. Then there is no need to update the refcount for tail pages. Same for freeing, then only one pair of get/put helper is needed now. The performance might be slightly better, too: both "inc unless zero" and "add unless zero" use the same cmpxchg implementation. For large folios, we saved an atomic operation. And for both order 0 and large folios, we saved a branch. Link: https://lkml.kernel.org/r/20260213-memcg-privid-v1-1-d8cb7afcf831@tencent.com Signed-off-by: Kairui Song <kasong@tencent.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Chen Ridong <chenridong@huaweicloud.com> Acked-by: Shakeel Butt <shakeel.butt@gmail.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
c9cb94c6b8
commit
37cb8cd043
@@ -635,11 +635,8 @@ void memcg1_swapout(struct folio *folio, swp_entry_t entry)
|
||||
* have an ID allocated to it anymore, charge the closest online
|
||||
* ancestor for the swap instead and transfer the memory+swap charge.
|
||||
*/
|
||||
swap_memcg = mem_cgroup_private_id_get_online(memcg);
|
||||
nr_entries = folio_nr_pages(folio);
|
||||
/* Get references for the tail pages, too */
|
||||
if (nr_entries > 1)
|
||||
mem_cgroup_private_id_get_many(swap_memcg, nr_entries - 1);
|
||||
swap_memcg = mem_cgroup_private_id_get_online(memcg, nr_entries);
|
||||
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
|
||||
|
||||
swap_cgroup_record(folio, mem_cgroup_private_id(swap_memcg), entry);
|
||||
|
||||
@@ -27,8 +27,8 @@ void drain_all_stock(struct mem_cgroup *root_memcg);
|
||||
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
|
||||
int memory_stat_show(struct seq_file *m, void *v);
|
||||
|
||||
void mem_cgroup_private_id_get_many(struct mem_cgroup *memcg, unsigned int n);
|
||||
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg);
|
||||
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg,
|
||||
unsigned int n);
|
||||
|
||||
/* Cgroup v1-specific declarations */
|
||||
#ifdef CONFIG_MEMCG_V1
|
||||
|
||||
@@ -3634,13 +3634,7 @@ static void mem_cgroup_private_id_remove(struct mem_cgroup *memcg)
|
||||
}
|
||||
}
|
||||
|
||||
void __maybe_unused mem_cgroup_private_id_get_many(struct mem_cgroup *memcg,
|
||||
unsigned int n)
|
||||
{
|
||||
refcount_add(n, &memcg->id.ref);
|
||||
}
|
||||
|
||||
static void mem_cgroup_private_id_put_many(struct mem_cgroup *memcg, unsigned int n)
|
||||
static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg, unsigned int n)
|
||||
{
|
||||
if (refcount_sub_and_test(n, &memcg->id.ref)) {
|
||||
mem_cgroup_private_id_remove(memcg);
|
||||
@@ -3650,14 +3644,9 @@ static void mem_cgroup_private_id_put_many(struct mem_cgroup *memcg, unsigned in
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_private_id_put(struct mem_cgroup *memcg)
|
||||
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg, unsigned int n)
|
||||
{
|
||||
mem_cgroup_private_id_put_many(memcg, 1);
|
||||
}
|
||||
|
||||
struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg)
|
||||
{
|
||||
while (!refcount_inc_not_zero(&memcg->id.ref)) {
|
||||
while (!refcount_add_not_zero(n, &memcg->id.ref)) {
|
||||
/*
|
||||
* The root cgroup cannot be destroyed, so it's refcount must
|
||||
* always be >= 1.
|
||||
@@ -3957,7 +3946,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
||||
|
||||
drain_all_stock(memcg);
|
||||
|
||||
mem_cgroup_private_id_put(memcg);
|
||||
mem_cgroup_private_id_put(memcg, 1);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
|
||||
@@ -5247,19 +5236,15 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcg = mem_cgroup_private_id_get_online(memcg);
|
||||
memcg = mem_cgroup_private_id_get_online(memcg, nr_pages);
|
||||
|
||||
if (!mem_cgroup_is_root(memcg) &&
|
||||
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
|
||||
memcg_memory_event(memcg, MEMCG_SWAP_MAX);
|
||||
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
|
||||
mem_cgroup_private_id_put(memcg);
|
||||
mem_cgroup_private_id_put(memcg, nr_pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Get references for the tail pages, too */
|
||||
if (nr_pages > 1)
|
||||
mem_cgroup_private_id_get_many(memcg, nr_pages - 1);
|
||||
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
|
||||
|
||||
swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry);
|
||||
@@ -5288,7 +5273,7 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
|
||||
page_counter_uncharge(&memcg->swap, nr_pages);
|
||||
}
|
||||
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
|
||||
mem_cgroup_private_id_put_many(memcg, nr_pages);
|
||||
mem_cgroup_private_id_put(memcg, nr_pages);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user