mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
ucount: replace get_ucounts_or_wrap() with atomic_inc_not_zero()
get_ucounts_or_wrap() increments the counter and if the counter is negative then it decrements it again in order to reset the previous increment. This statement can be replaced with atomic_inc_not_zero() to only increment the counter if it is not yet 0. This simplifies the get function because the put (if the get failed) can be removed. atomic_inc_not_zero() is implement as a cmpxchg() loop which can be repeated several times if another get/put is performed in parallel. This will be optimized later. Increment the reference counter only if not yet dropped to zero. Link: https://lkml.kernel.org/r/20250203150525.456525-3-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Lai jiangshan <jiangshanlai@gmail.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Mengen Sun <mengensun@tencent.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com> Cc: YueHong Wu <yuehongwu@tencent.com> Cc: Zqiang <qiang.zhang1211@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
8c6bbda879
commit
328152e677
@@ -146,25 +146,16 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
|
||||
spin_unlock_irq(&ucounts_lock);
|
||||
}
|
||||
|
||||
static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
|
||||
{
|
||||
/* Returns true on a successful get, false if the count wraps. */
|
||||
return !atomic_add_negative(1, &ucounts->count);
|
||||
}
|
||||
|
||||
struct ucounts *get_ucounts(struct ucounts *ucounts)
|
||||
{
|
||||
if (!get_ucounts_or_wrap(ucounts)) {
|
||||
put_ucounts(ucounts);
|
||||
ucounts = NULL;
|
||||
}
|
||||
return ucounts;
|
||||
if (atomic_inc_not_zero(&ucounts->count))
|
||||
return ucounts;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
|
||||
{
|
||||
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
|
||||
bool wrapped;
|
||||
struct ucounts *ucounts, *new = NULL;
|
||||
|
||||
spin_lock_irq(&ucounts_lock);
|
||||
@@ -189,14 +180,11 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
|
||||
return new;
|
||||
}
|
||||
}
|
||||
|
||||
wrapped = !get_ucounts_or_wrap(ucounts);
|
||||
if (!atomic_inc_not_zero(&ucounts->count))
|
||||
ucounts = NULL;
|
||||
spin_unlock_irq(&ucounts_lock);
|
||||
kfree(new);
|
||||
if (wrapped) {
|
||||
put_ucounts(ucounts);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ucounts;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user