io_uring/zctx: unify zerocopy issue variants

io_send_zc and io_sendmsg_zc started different but now the only real
difference between them is how registered buffers are imported and
which net helper we use. Avoid duplication and combine them into a
single function.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov
2026-02-16 11:45:55 +00:00
committed by Jens Axboe
parent 2f9965f5d5
commit 403fec55bf
3 changed files with 17 additions and 78 deletions

View File

@@ -1471,72 +1471,6 @@ static int io_send_zc_import(struct io_kiocb *req,
return 0; return 0;
} }
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned msg_flags;
int ret, min_ret = 0;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
if (req->flags & REQ_F_IMPORT_BUFFER) {
ret = io_send_zc_import(req, kmsg, issue_flags);
if (unlikely(ret))
return ret;
}
msg_flags = zc->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
kmsg->msg.msg_flags = msg_flags;
kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
ret = sock_sendmsg(sock, &kmsg->msg);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
zc->done_io += ret;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
if (ret >= 0)
ret += zc->done_io;
else if (zc->done_io)
ret = zc->done_io;
/*
* If we're in io-wq we can't rely on tw ordering guarantees, defer
* flushing notif to io_send_zc_cleanup()
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
zc->notif = NULL;
io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
return IOU_COMPLETE;
}
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -1545,37 +1479,43 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned msg_flags; unsigned msg_flags;
int ret, min_ret = 0; int ret, min_ret = 0;
if (req->flags & REQ_F_IMPORT_BUFFER) {
ret = io_send_zc_import(req, kmsg, issue_flags);
if (unlikely(ret))
return ret;
}
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN; return -EAGAIN;
if (req->flags & REQ_F_IMPORT_BUFFER) {
ret = io_send_zc_import(req, kmsg, issue_flags);
if (unlikely(ret))
return ret;
}
msg_flags = sr->msg_flags; msg_flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK) if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT; msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL) if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter); min_ret = iov_iter_count(&kmsg->msg.msg_iter);
kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
if (req->opcode == IORING_OP_SEND_ZC) {
msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
kmsg->msg.msg_flags = msg_flags;
ret = sock_sendmsg(sock, &kmsg->msg);
} else {
kmsg->msg.msg_control_user = sr->msg_control;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
}
if (unlikely(ret < min_ret)) { if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN; return -EAGAIN;
if (ret > 0 && io_net_retry(sock, msg_flags)) { if (ret > 0 && io_net_retry(sock, sr->msg_flags)) {
sr->done_io += ret; sr->done_io += ret;
return -EAGAIN; return -EAGAIN;
} }

View File

@@ -50,7 +50,6 @@ void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req)
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags); int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags); int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req); void io_send_zc_cleanup(struct io_kiocb *req);

View File

@@ -437,7 +437,7 @@ const struct io_issue_def io_issue_defs[] = {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep, .prep = io_send_zc_prep,
.issue = io_send_zc, .issue = io_sendmsg_zc,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif