maple_tree: remove mas_destroy() from mas_nomem()

Separate call to mas_destroy() from mas_nomem() so we can check for no
memory errors without destroying the current maple state in
mas_store_gfp().  We then add calls to mas_destroy() to callers of
mas_nomem().

Link: https://lkml.kernel.org/r/20240814161944.55347-6-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Sidhartha Kumar
2024-08-14 12:19:32 -04:00
committed by Andrew Morton
parent 5d659bbb52
commit 3cd9e92e00
2 changed files with 31 additions and 18 deletions

View File

@@ -4519,6 +4519,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
if (*next == 0)
mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED;
mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL(mas_alloc_cyclic);
@@ -5601,21 +5602,25 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
unsigned long index = mas->index;
unsigned long last = mas->last;
MA_WR_STATE(wr_mas, mas, entry);
int ret = 0;
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
retry:
mas_wr_store_entry(&wr_mas);
mas_wr_preallocate(&wr_mas, entry);
if (unlikely(mas_nomem(mas, gfp))) {
if (!entry)
__mas_set_range(mas, index, last);
goto retry;
}
if (unlikely(mas_is_err(mas)))
return xa_err(mas->node);
if (mas_is_err(mas)) {
ret = xa_err(mas->node);
goto out;
}
return 0;
mas_wr_store_entry(&wr_mas);
out:
mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
@@ -6374,6 +6379,7 @@ write_retry:
goto write_retry;
}
mas_destroy(mas);
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
@@ -6388,10 +6394,8 @@ EXPORT_SYMBOL_GPL(mas_erase);
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
if (likely(mas->node != MA_ERROR(-ENOMEM))) {
mas_destroy(mas);
if (likely(mas->node != MA_ERROR(-ENOMEM)))
return false;
}
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
@@ -6469,6 +6473,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
{
MA_STATE(mas, mt, index, last);
MA_WR_STATE(wr_mas, &mas, entry);
int ret = 0;
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
@@ -6484,10 +6489,12 @@ retry:
goto retry;
mtree_unlock(mt);
if (mas_is_err(&mas))
return xa_err(mas.node);
return 0;
if (mas_is_err(&mas))
ret = xa_err(mas.node);
mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_store_range);
@@ -6523,6 +6530,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
int ret = 0;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6538,9 +6546,10 @@ retry:
mtree_unlock(mt);
if (mas_is_err(&ms))
return xa_err(ms.node);
ret = xa_err(ms.node);
return 0;
mas_destroy(&ms);
return ret;
}
EXPORT_SYMBOL(mtree_insert_range);
@@ -6595,6 +6604,7 @@ retry:
unlock:
mtree_unlock(mt);
mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
@@ -6676,6 +6686,7 @@ retry:
unlock:
mtree_unlock(mt);
mas_destroy(&mas);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);