drm/msm: Avoid mutex in shrinker_count()

When the system is under heavy memory pressure, we can end up with lots
of concurrent calls into the shrinker.  Keeping a running tab on what we
can shrink avoids grabbing a lock in shrinker->count(), and avoids
shrinker->scan() getting called when not profitable.

Also, we can keep purged objects in their own list to avoid re-traversing
them to help cut down time in the critical section further.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Tested-by: Douglas Anderson <dianders@chromium.org>
Reviewed-by: Douglas Anderson <dianders@chromium.org>
Link: https://lore.kernel.org/r/20210401012722.527712-3-robdclark@gmail.com
Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Rob Clark
2021-03-31 18:27:19 -07:00
parent bc90dc33c4
commit cc8a4d5a1b
5 changed files with 81 additions and 27 deletions

View File

@@ -14,22 +14,7 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct msm_gem_object *msm_obj;
unsigned long count = 0;
mutex_lock(&priv->mm_lock);
list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
if (!msm_gem_trylock(&msm_obj->base))
continue;
if (is_purgeable(msm_obj))
count += msm_obj->base.size >> PAGE_SHIFT;
msm_gem_unlock(&msm_obj->base);
}
mutex_unlock(&priv->mm_lock);
return count;
return priv->shrinkable_count;
}
static unsigned long
@@ -45,6 +30,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
if (freed >= sc->nr_to_scan)
break;
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
if (!msm_gem_trylock(&msm_obj->base))
continue;
if (is_purgeable(msm_obj)) {
@@ -56,8 +44,11 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
if (freed > 0)
if (freed > 0) {
trace_msm_gem_purge(freed << PAGE_SHIFT);
} else {
return SHRINK_STOP;
}
return freed;
}
@@ -75,6 +66,9 @@ vmap_shrink(struct list_head *mm_list)
unsigned unmapped = 0;
list_for_each_entry(msm_obj, mm_list, mm_list) {
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
if (!msm_gem_trylock(&msm_obj->base))
continue;
if (is_vunmapable(msm_obj)) {