io_uring/tctx: clean up __io_uring_add_tctx_node() error handling

Refactor __io_uring_add_tctx_node() so that on error it never leaves
current->io_uring pointing at a half-setup tctx. This moves the
assignment of current->io_uring to the end of the function post any
failure points.

Separate out the node installation into io_tctx_install_node() to
further clean this up.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe
2026-04-08 11:31:38 -06:00
parent 2c453a4281
commit 7880174e1e

View File

@@ -108,36 +108,15 @@ __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *tas
return tctx;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
static int io_tctx_install_node(struct io_ring_ctx *ctx,
struct io_uring_task *tctx)
{
struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node;
int ret;
if (unlikely(!tctx)) {
tctx = io_uring_alloc_task_context(current, ctx);
if (IS_ERR(tctx))
return PTR_ERR(tctx);
if (xa_load(&tctx->xa, (unsigned long)ctx))
return 0;
current->io_uring = tctx;
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
ret = io_wq_max_workers(tctx->io_wq, limits);
if (ret)
return ret;
}
}
/*
* Re-activate io-wq keepalive on any new io_uring usage. The wq may have
* been marked for idle-exit when the task temporarily had no active
* io_uring instances.
*/
if (tctx->io_wq)
io_wq_set_exit_on_idle(tctx->io_wq, false);
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
node = kmalloc_obj(*node);
if (!node)
return -ENOMEM;
@@ -154,10 +133,51 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
mutex_lock(&ctx->tctx_lock);
list_add(&node->ctx_node, &ctx->tctx_list);
mutex_unlock(&ctx->tctx_lock);
}
return 0;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
int ret;
if (unlikely(!tctx)) {
tctx = io_uring_alloc_task_context(current, ctx);
if (IS_ERR(tctx))
return PTR_ERR(tctx);
if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) {
unsigned int limits[2] = { ctx->iowq_limits[0],
ctx->iowq_limits[1], };
ret = io_wq_max_workers(tctx->io_wq, limits);
if (ret)
goto err_free;
}
}
/*
* Re-activate io-wq keepalive on any new io_uring usage. The wq may have
* been marked for idle-exit when the task temporarily had no active
* io_uring instances.
*/
if (tctx->io_wq)
io_wq_set_exit_on_idle(tctx->io_wq, false);
ret = io_tctx_install_node(ctx, tctx);
if (!ret) {
current->io_uring = tctx;
return 0;
}
if (!current->io_uring) {
err_free:
io_wq_put_and_exit(tctx->io_wq);
percpu_counter_destroy(&tctx->inflight);
kfree(tctx);
}
return ret;
}
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
{
int ret;