mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Commit9618908026addressed one case of ctx->rings being potentially accessed while a resize is happening on the ring, but there are still a few others that need handling. Add a helper for retrieving the rings associated with an io_uring context, and add some sanity checking to that to catch bad uses. ->rings_rcu is always valid, as long as it's used within RCU read lock. Any use of ->rings_rcu or ->rings inside either ->uring_lock or ->completion_lock is sane as well. Do the minimum fix for the current kernel, but set it up such that this basic infra can be extended for later kernels to make this harder to mess up in the future. Thanks to Junxi Qian for finding and debugging this issue. Cc: stable@vger.kernel.org Fixes:79cfe9e59c("io_uring/register: add IORING_REGISTER_RESIZE_RINGS") Reviewed-by: Junxi Qian <qjx1298677004@gmail.com> Tested-by: Junxi Qian <qjx1298677004@gmail.com> Link: https://lore.kernel.org/io-uring/20260330172348.89416-1-qjx1298677004@gmail.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
53 lines
1.3 KiB
C
53 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef IOU_WAIT_H
|
|
#define IOU_WAIT_H
|
|
|
|
#include <linux/io_uring_types.h>
|
|
|
|
/*
|
|
* No waiters. It's larger than any valid value of the tw counter
|
|
* so that tests against ->cq_wait_nr would fail and skip wake_up().
|
|
*/
|
|
#define IO_CQ_WAKE_INIT (-1U)
|
|
/* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
|
|
#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
|
|
|
|
struct ext_arg {
|
|
size_t argsz;
|
|
struct timespec64 ts;
|
|
const sigset_t __user *sig;
|
|
ktime_t min_time;
|
|
bool ts_set;
|
|
bool iowait;
|
|
};
|
|
|
|
int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
|
|
struct ext_arg *ext_arg);
|
|
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
|
void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx);
|
|
|
|
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
|
|
{
|
|
struct io_rings *rings = io_get_rings(ctx);
|
|
return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
|
|
}
|
|
|
|
static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
|
|
{
|
|
struct io_rings *rings = io_get_rings(ctx);
|
|
|
|
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
|
|
}
|
|
|
|
/*
|
|
* Reads the tail/head of the CQ ring while providing an acquire ordering,
|
|
* see comment at top of io_uring.c.
|
|
*/
|
|
static inline unsigned io_cqring_events(struct io_ring_ctx *ctx)
|
|
{
|
|
smp_rmb();
|
|
return __io_cqring_events(ctx);
|
|
}
|
|
|
|
#endif
|