ublk: optimize ublk_user_copy() on daemon task

ublk user copy syscalls may be issued from any task, so they take a
reference count on the struct ublk_io to check whether it is owned by
the ublk server and prevent a concurrent UBLK_IO_COMMIT_AND_FETCH_REQ
from completing the request. However, if the user copy syscall is issued
on the io's daemon task, a concurrent UBLK_IO_COMMIT_AND_FETCH_REQ isn't
possible, so the atomic reference count dance is unnecessary. Check for
UBLK_IO_FLAG_OWNED_BY_SRV to ensure the request is dispatched to the
sever and obtain the request from ublk_io's req field instead of looking
it up on the tagset. Skip the reference count increment and decrement.
Commit 8a8fe42d76 ("ublk: optimize UBLK_IO_REGISTER_IO_BUF on daemon
task") made an analogous optimization for ublk zero copy buffer
registration.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Caleb Sander Mateos
2026-01-08 02:19:39 -07:00
committed by Jens Axboe
parent b2503e936b
commit bfe1255712

View File

@@ -183,7 +183,7 @@ struct ublk_io {
* if user copy or zero copy are enabled:
* - UBLK_REFCOUNT_INIT from dispatch to the server
* until UBLK_IO_COMMIT_AND_FETCH_REQ
* - 1 for each inflight ublk_ch_{read,write}_iter() call
* - 1 for each inflight ublk_ch_{read,write}_iter() call not on task
* - 1 for each io_uring registered buffer not registered on task
* The I/O can only be completed once all references are dropped.
* User copy and buffer registration operations are only permitted
@@ -2698,6 +2698,7 @@ ublk_user_copy(struct kiocb *iocb, struct iov_iter *iter, int dir)
struct ublk_io *io;
unsigned data_len;
bool is_integrity;
bool on_daemon;
size_t buf_off;
u16 tag, q_id;
ssize_t ret;
@@ -2727,9 +2728,20 @@ ublk_user_copy(struct kiocb *iocb, struct iov_iter *iter, int dir)
return -EINVAL;
io = &ubq->ios[tag];
req = __ublk_check_and_get_req(ub, q_id, tag, io);
if (!req)
return -EINVAL;
on_daemon = current == READ_ONCE(io->task);
if (on_daemon) {
/* On daemon, io can't be completed concurrently, so skip ref */
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
return -EINVAL;
req = io->req;
if (!ublk_rq_has_data(req))
return -EINVAL;
} else {
req = __ublk_check_and_get_req(ub, q_id, tag, io);
if (!req)
return -EINVAL;
}
if (is_integrity) {
struct blk_integrity *bi = &req->q->limits.integrity;
@@ -2754,7 +2766,8 @@ ublk_user_copy(struct kiocb *iocb, struct iov_iter *iter, int dir)
ret = ublk_copy_user_pages(req, buf_off, iter, dir);
out:
ublk_put_req_ref(io, req);
if (!on_daemon)
ublk_put_req_ref(io, req);
return ret;
}