mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
There are reports where io_uring instance removal takes too long and an ifq reallocation by another zcrx instance fails. Split zcrx destruction into two steps similarly how it was before, first close the queue early but maintain zcrx alive, and then when all inflight requests are completed, drop the main zcrx reference. For extra protection, mark terminated zcrx instances in xarray and warn if we double put them. Cc: stable@vger.kernel.org # 6.19+ Link: https://github.com/axboe/liburing/issues/1550 Reported-by: Youngmin Choi <youngminchoi94@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://patch.msgid.link/0ce21f0565ab4358668922a28a8a36922dfebf76.1774261953.git.asml.silence@gmail.com [axboe: NULL ifq before break inside scoped guard] Signed-off-by: Jens Axboe <axboe@kernel.dk>
117 lines
2.9 KiB
C
117 lines
2.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef IOU_ZC_RX_H
|
|
#define IOU_ZC_RX_H
|
|
|
|
#include <linux/io_uring_types.h>
|
|
#include <linux/dma-buf.h>
|
|
#include <linux/socket.h>
|
|
#include <net/page_pool/types.h>
|
|
#include <net/net_trackers.h>
|
|
|
|
#define ZCRX_SUPPORTED_REG_FLAGS (ZCRX_REG_IMPORT)
|
|
#define ZCRX_FEATURES (ZCRX_FEATURE_RX_PAGE_SIZE)
|
|
|
|
struct io_zcrx_mem {
|
|
unsigned long size;
|
|
bool is_dmabuf;
|
|
|
|
struct page **pages;
|
|
unsigned long nr_folios;
|
|
struct sg_table page_sg_table;
|
|
unsigned long account_pages;
|
|
struct sg_table *sgt;
|
|
|
|
struct dma_buf_attachment *attach;
|
|
struct dma_buf *dmabuf;
|
|
};
|
|
|
|
struct io_zcrx_area {
|
|
struct net_iov_area nia;
|
|
struct io_zcrx_ifq *ifq;
|
|
atomic_t *user_refs;
|
|
|
|
bool is_mapped;
|
|
u16 area_id;
|
|
|
|
/* freelist */
|
|
spinlock_t freelist_lock ____cacheline_aligned_in_smp;
|
|
u32 free_count;
|
|
u32 *freelist;
|
|
|
|
struct io_zcrx_mem mem;
|
|
};
|
|
|
|
struct io_zcrx_ifq {
|
|
struct io_zcrx_area *area;
|
|
unsigned niov_shift;
|
|
struct user_struct *user;
|
|
struct mm_struct *mm_account;
|
|
|
|
spinlock_t rq_lock ____cacheline_aligned_in_smp;
|
|
struct io_uring *rq_ring;
|
|
struct io_uring_zcrx_rqe *rqes;
|
|
u32 cached_rq_head;
|
|
u32 rq_entries;
|
|
|
|
u32 if_rxq;
|
|
struct device *dev;
|
|
struct net_device *netdev;
|
|
netdevice_tracker netdev_tracker;
|
|
refcount_t refs;
|
|
/* counts userspace facing users like io_uring */
|
|
refcount_t user_refs;
|
|
|
|
/*
|
|
* Page pool and net configuration lock, can be taken deeper in the
|
|
* net stack.
|
|
*/
|
|
struct mutex pp_lock;
|
|
struct io_mapped_region region;
|
|
};
|
|
|
|
#if defined(CONFIG_IO_URING_ZCRX)
|
|
int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_arg);
|
|
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
|
struct io_uring_zcrx_ifq_reg __user *arg);
|
|
void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx);
|
|
void io_terminate_zcrx(struct io_ring_ctx *ctx);
|
|
int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
|
|
struct socket *sock, unsigned int flags,
|
|
unsigned issue_flags, unsigned int *len);
|
|
struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
|
|
unsigned int id);
|
|
#else
|
|
static inline int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
|
struct io_uring_zcrx_ifq_reg __user *arg)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static inline void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline void io_terminate_zcrx(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
|
|
struct socket *sock, unsigned int flags,
|
|
unsigned issue_flags, unsigned int *len)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
static inline struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
|
|
unsigned int id)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline int io_zcrx_ctrl(struct io_ring_ctx *ctx,
|
|
void __user *arg, unsigned nr_arg)
|
|
{
|
|
return -EOPNOTSUPP;
|
|
}
|
|
#endif
|
|
|
|
int io_recvzc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
|
|
#endif
|