mm: add gpu active/reclaim per-node stat counters (v2)

While discussing memcg intergration with gpu memory allocations,
it was pointed out that there was no numa/system counters for
GPU memory allocations.

With more integrated memory GPU server systems turning up, and
more requirements for memory tracking it seems we should start
closing the gap.

Add two counters to track GPU per-node system memory allocations.

The first is currently allocated to GPU objects, and the second
is for memory that is stored in GPU page pools that can be reclaimed,
by the shrinker.

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie
2026-02-24 12:06:18 +10:00
parent 322e4116ac
commit 2232ba9c79
6 changed files with 28 additions and 1 deletions

View File

@@ -1089,6 +1089,8 @@ Example output. You may not have all of these fields.
CmaFree: 0 kB
Unaccepted: 0 kB
Balloon: 0 kB
GPUActive: 0 kB
GPUReclaim: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
@@ -1269,6 +1271,12 @@ Unaccepted
Memory that has not been accepted by the guest
Balloon
Memory returned to Host by VM Balloon Drivers
GPUActive
System memory allocated to active GPU objects
GPUReclaim
System memory stored in GPU pools for reuse. This memory is not
counted in GPUActive. It is shrinker reclaimable memory kept in a reuse
pool because it has non-standard page table attributes, like WC or UC.
HugePages_Total, HugePages_Free, HugePages_Rsvd, HugePages_Surp, Hugepagesize, Hugetlb
See Documentation/admin-guide/mm/hugetlbpage.rst.
DirectMap4k, DirectMap2M, DirectMap1G

View File

@@ -523,6 +523,8 @@ static ssize_t node_read_meminfo(struct device *dev,
#ifdef CONFIG_UNACCEPTED_MEMORY
"Node %d Unaccepted: %8lu kB\n"
#endif
"Node %d GPUActive: %8lu kB\n"
"Node %d GPUReclaim: %8lu kB\n"
,
nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
nid, K(node_page_state(pgdat, NR_WRITEBACK)),
@@ -556,6 +558,9 @@ static ssize_t node_read_meminfo(struct device *dev,
,
nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED))
#endif
,
nid, K(node_page_state(pgdat, NR_GPU_ACTIVE)),
nid, K(node_page_state(pgdat, NR_GPU_RECLAIM))
);
len += hugetlb_report_node_meminfo(buf, len, nid);
return len;

View File

@@ -163,6 +163,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Balloon: ",
global_node_page_state(NR_BALLOON_PAGES));
show_val_kb(m, "GPUActive: ",
global_node_page_state(NR_GPU_ACTIVE));
show_val_kb(m, "GPUReclaim: ",
global_node_page_state(NR_GPU_RECLAIM));
hugetlb_report_meminfo(m);
arch_report_meminfo(m);

View File

@@ -260,6 +260,8 @@ enum node_stat_item {
#endif
NR_BALLOON_PAGES,
NR_KERNEL_FILE_PAGES,
NR_GPU_ACTIVE, /* Pages assigned to GPU objects */
NR_GPU_RECLAIM, /* Pages in shrinkable GPU pools */
NR_VM_NODE_STAT_ITEMS
};

View File

@@ -254,6 +254,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
" sec_pagetables:%lukB"
" all_unreclaimable? %s"
" Balloon:%lukB"
" gpu_active:%lukB"
" gpu_reclaim:%lukB"
"\n",
pgdat->node_id,
K(node_page_state(pgdat, NR_ACTIVE_ANON)),
@@ -279,7 +281,9 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
K(node_page_state(pgdat, NR_PAGETABLE)),
K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
str_yes_no(kswapd_test_hopeless(pgdat)),
K(node_page_state(pgdat, NR_BALLOON_PAGES)));
K(node_page_state(pgdat, NR_BALLOON_PAGES)),
K(node_page_state(pgdat, NR_GPU_ACTIVE)),
K(node_page_state(pgdat, NR_GPU_RECLAIM)));
}
for_each_populated_zone(zone) {

View File

@@ -1281,6 +1281,8 @@ const char * const vmstat_text[] = {
#endif
[I(NR_BALLOON_PAGES)] = "nr_balloon_pages",
[I(NR_KERNEL_FILE_PAGES)] = "nr_kernel_file_pages",
[I(NR_GPU_ACTIVE)] = "nr_gpu_active",
[I(NR_GPU_RECLAIM)] = "nr_gpu_reclaim",
#undef I
/* system-wide enum vm_stat_item counters */