mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
ttm/pool: port to list_lru. (v2)
This is an initial port of the TTM pools for write combined and uncached pages to use the list_lru. This makes the pool's more NUMA aware and avoids needing separate NUMA pools (later commit enables this). Cc: Christian Koenig <christian.koenig@amd.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Dave Chinner <david@fromorbit.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
@@ -176,7 +176,7 @@ static void ttm_device_init_pools(struct kunit *test)
|
||||
|
||||
if (ttm_pool_uses_dma_alloc(pool))
|
||||
KUNIT_ASSERT_FALSE(test,
|
||||
list_empty(&pt.pages));
|
||||
!list_lru_count(&pt.pages));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ static void ttm_pool_alloc_order_caching_match(struct kunit *test)
|
||||
pool = ttm_pool_pre_populated(test, size, caching);
|
||||
|
||||
pt = &pool->caching[caching].orders[order];
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
|
||||
|
||||
tt = ttm_tt_kunit_init(test, 0, caching, size);
|
||||
KUNIT_ASSERT_NOT_NULL(test, tt);
|
||||
@@ -256,7 +256,7 @@ static void ttm_pool_alloc_order_caching_match(struct kunit *test)
|
||||
err = ttm_pool_alloc(pool, tt, &simple_ctx);
|
||||
KUNIT_ASSERT_EQ(test, err, 0);
|
||||
|
||||
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
|
||||
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
@@ -282,8 +282,8 @@ static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
|
||||
tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
|
||||
KUNIT_ASSERT_NOT_NULL(test, tt);
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
|
||||
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
|
||||
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
|
||||
|
||||
err = ttm_pool_alloc(pool, tt, &simple_ctx);
|
||||
KUNIT_ASSERT_EQ(test, err, 0);
|
||||
@@ -291,8 +291,8 @@ static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
|
||||
|
||||
ttm_pool_fini(pool);
|
||||
}
|
||||
@@ -316,8 +316,8 @@ static void ttm_pool_alloc_order_mismatch(struct kunit *test)
|
||||
tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
|
||||
KUNIT_ASSERT_NOT_NULL(test, tt);
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
|
||||
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
|
||||
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages));
|
||||
|
||||
err = ttm_pool_alloc(pool, tt, &simple_ctx);
|
||||
KUNIT_ASSERT_EQ(test, err, 0);
|
||||
@@ -325,8 +325,8 @@ static void ttm_pool_alloc_order_mismatch(struct kunit *test)
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages));
|
||||
|
||||
ttm_pool_fini(pool);
|
||||
}
|
||||
@@ -352,12 +352,12 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
|
||||
ttm_pool_alloc(pool, tt, &simple_ctx);
|
||||
|
||||
pt = &pool->caching[caching].orders[order];
|
||||
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
|
||||
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
|
||||
|
||||
ttm_pool_fini(pool);
|
||||
}
|
||||
@@ -383,12 +383,12 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
|
||||
ttm_pool_alloc(pool, tt, &simple_ctx);
|
||||
|
||||
pt = &pool->caching[caching].orders[order];
|
||||
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
|
||||
KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
|
||||
|
||||
ttm_pool_free(pool, tt);
|
||||
ttm_tt_fini(tt);
|
||||
|
||||
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
|
||||
KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1);
|
||||
|
||||
ttm_pool_fini(pool);
|
||||
}
|
||||
@@ -404,11 +404,11 @@ static void ttm_pool_fini_basic(struct kunit *test)
|
||||
pool = ttm_pool_pre_populated(test, size, caching);
|
||||
pt = &pool->caching[caching].orders[order];
|
||||
|
||||
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages));
|
||||
|
||||
ttm_pool_fini(pool);
|
||||
|
||||
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
|
||||
KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages));
|
||||
}
|
||||
|
||||
static struct kunit_case ttm_pool_test_cases[] = {
|
||||
|
||||
@@ -132,6 +132,16 @@ static struct list_head shrinker_list;
|
||||
static struct shrinker *mm_shrinker;
|
||||
static DECLARE_RWSEM(pool_shrink_rwsem);
|
||||
|
||||
static int ttm_pool_nid(struct ttm_pool *pool)
|
||||
{
|
||||
int nid = NUMA_NO_NODE;
|
||||
if (pool)
|
||||
nid = pool->nid;
|
||||
if (nid == NUMA_NO_NODE)
|
||||
nid = numa_node_id();
|
||||
return nid;
|
||||
}
|
||||
|
||||
/* Allocate pages of size 1 << order with the given gfp_flags */
|
||||
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
|
||||
unsigned int order)
|
||||
@@ -297,30 +307,41 @@ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
|
||||
clear_page(page_address(p + i));
|
||||
}
|
||||
|
||||
spin_lock(&pt->lock);
|
||||
list_add(&p->lru, &pt->pages);
|
||||
spin_unlock(&pt->lock);
|
||||
INIT_LIST_HEAD(&p->lru);
|
||||
rcu_read_lock();
|
||||
list_lru_add(&pt->pages, &p->lru, page_to_nid(p), NULL);
|
||||
rcu_read_unlock();
|
||||
atomic_long_add(1 << pt->order, &allocated_pages);
|
||||
|
||||
mod_lruvec_page_state(p, NR_GPU_ACTIVE, -num_pages);
|
||||
mod_lruvec_page_state(p, NR_GPU_RECLAIM, num_pages);
|
||||
}
|
||||
|
||||
/* Take pages from a specific pool_type, return NULL when nothing available */
|
||||
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
|
||||
static enum lru_status take_one_from_lru(struct list_head *item,
|
||||
struct list_lru_one *list,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct page *p;
|
||||
struct page **out_page = cb_arg;
|
||||
struct page *p = container_of(item, struct page, lru);
|
||||
list_lru_isolate(list, item);
|
||||
|
||||
spin_lock(&pt->lock);
|
||||
p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
|
||||
if (p) {
|
||||
*out_page = p;
|
||||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
/* Take pages from a specific pool_type, return NULL when nothing available */
|
||||
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid)
|
||||
{
|
||||
int ret;
|
||||
struct page *p = NULL;
|
||||
unsigned long nr_to_walk = 1;
|
||||
|
||||
ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&p, &nr_to_walk);
|
||||
if (ret == 1 && p) {
|
||||
atomic_long_sub(1 << pt->order, &allocated_pages);
|
||||
mod_lruvec_page_state(p, NR_GPU_ACTIVE, (1 << pt->order));
|
||||
mod_lruvec_page_state(p, NR_GPU_RECLAIM, -(1 << pt->order));
|
||||
list_del(&p->lru);
|
||||
}
|
||||
spin_unlock(&pt->lock);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
@@ -331,25 +352,47 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
|
||||
pt->pool = pool;
|
||||
pt->caching = caching;
|
||||
pt->order = order;
|
||||
spin_lock_init(&pt->lock);
|
||||
INIT_LIST_HEAD(&pt->pages);
|
||||
list_lru_init(&pt->pages);
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
list_add_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
}
|
||||
|
||||
static enum lru_status pool_move_to_dispose_list(struct list_head *item,
|
||||
struct list_lru_one *list,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct list_head *dispose = cb_arg;
|
||||
|
||||
list_lru_isolate_move(list, item, dispose);
|
||||
|
||||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
static void ttm_pool_dispose_list(struct ttm_pool_type *pt,
|
||||
struct list_head *dispose)
|
||||
{
|
||||
while (!list_empty(dispose)) {
|
||||
struct page *p;
|
||||
p = list_first_entry(dispose, struct page, lru);
|
||||
list_del_init(&p->lru);
|
||||
atomic_long_sub(1 << pt->order, &allocated_pages);
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove a pool_type from the global shrinker list and free all pages */
|
||||
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p;
|
||||
LIST_HEAD(dispose);
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
list_del(&pt->shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
while ((p = ttm_pool_type_take(pt)))
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
|
||||
list_lru_walk(&pt->pages, pool_move_to_dispose_list, &dispose, LONG_MAX);
|
||||
ttm_pool_dispose_list(pt, &dispose);
|
||||
}
|
||||
|
||||
/* Return the pool_type to use for the given caching and order */
|
||||
@@ -399,7 +442,7 @@ static unsigned int ttm_pool_shrink(void)
|
||||
list_move_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
p = ttm_pool_type_take(pt);
|
||||
p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool));
|
||||
if (p) {
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true);
|
||||
num_pages = 1 << pt->order;
|
||||
@@ -756,7 +799,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
|
||||
p = NULL;
|
||||
pt = ttm_pool_select_type(pool, page_caching, order);
|
||||
if (pt && allow_pools)
|
||||
p = ttm_pool_type_take(pt);
|
||||
p = ttm_pool_type_take(pt, ttm_pool_nid(pool));
|
||||
/*
|
||||
* If that fails or previously failed, allocate from system.
|
||||
* Note that this also disallows additional pool allocations using
|
||||
@@ -1185,16 +1228,7 @@ static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
|
||||
/* Count the number of pages available in a pool_type */
|
||||
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
struct page *p;
|
||||
|
||||
spin_lock(&pt->lock);
|
||||
/* Only used for debugfs, the overhead doesn't matter */
|
||||
list_for_each_entry(p, &pt->pages, lru)
|
||||
++count;
|
||||
spin_unlock(&pt->lock);
|
||||
|
||||
return count;
|
||||
return list_lru_count(&pt->pages);
|
||||
}
|
||||
|
||||
/* Print a nice header for the order */
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include <drm/ttm/ttm_caching.h>
|
||||
|
||||
struct device;
|
||||
@@ -45,8 +46,7 @@ struct ttm_tt;
|
||||
* @order: the allocation order our pages have
|
||||
* @caching: the caching type our pages have
|
||||
* @shrinker_list: our place on the global shrinker list
|
||||
* @lock: protection of the page list
|
||||
* @pages: the list of pages in the pool
|
||||
* @pages: the lru_list of pages in the pool
|
||||
*/
|
||||
struct ttm_pool_type {
|
||||
struct ttm_pool *pool;
|
||||
@@ -55,8 +55,7 @@ struct ttm_pool_type {
|
||||
|
||||
struct list_head shrinker_list;
|
||||
|
||||
spinlock_t lock;
|
||||
struct list_head pages;
|
||||
struct list_lru pages;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -179,6 +179,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
|
||||
unlock_list_lru(l, false);
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(list_lru_add);
|
||||
|
||||
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user