mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
block: pass io_comp_batch to rq_end_io_fn callback
Add a third parameter 'const struct io_comp_batch *' to the rq_end_io_fn callback signature. This allows end_io handlers to access the completion batch context when requests are completed via blk_mq_end_request_batch(). The io_comp_batch is passed from blk_mq_end_request_batch(), while NULL is passed from __blk_mq_end_request() and blk_mq_put_rq_ref() which don't have batch context. This infrastructure change enables drivers to detect whether they're being called from a batched completion path (like iopoll) and access additional context stored in the io_comp_batch. Update all rq_end_io_fn implementations: - block/blk-mq.c: blk_end_sync_rq - block/blk-flush.c: flush_end_io, mq_flush_data_end_io - drivers/nvme/host/ioctl.c: nvme_uring_cmd_end_io - drivers/nvme/host/core.c: nvme_keep_alive_end_io - drivers/nvme/host/pci.c: abort_endio, nvme_del_queue_end, nvme_del_cq_end - drivers/nvme/target/passthru.c: nvmet_passthru_req_done - drivers/scsi/scsi_error.c: eh_lock_door_done - drivers/scsi/sg.c: sg_rq_end_io - drivers/scsi/st.c: st_scsi_execute_end - drivers/target/target_core_pscsi.c: pscsi_req_done - drivers/md/dm-rq.c: end_clone_request Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Kanchan Joshi <joshi.k@samsung.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -199,7 +199,8 @@ static void blk_flush_complete_seq(struct request *rq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
|
||||||
blk_status_t error)
|
blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct request_queue *q = flush_rq->q;
|
struct request_queue *q = flush_rq->q;
|
||||||
struct list_head *running;
|
struct list_head *running;
|
||||||
@@ -335,7 +336,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
|
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
|
||||||
blk_status_t error)
|
blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
|||||||
@@ -1156,7 +1156,7 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
|
|||||||
|
|
||||||
if (rq->end_io) {
|
if (rq->end_io) {
|
||||||
rq_qos_done(rq->q, rq);
|
rq_qos_done(rq->q, rq);
|
||||||
if (rq->end_io(rq, error) == RQ_END_IO_FREE)
|
if (rq->end_io(rq, error, NULL) == RQ_END_IO_FREE)
|
||||||
blk_mq_free_request(rq);
|
blk_mq_free_request(rq);
|
||||||
} else {
|
} else {
|
||||||
blk_mq_free_request(rq);
|
blk_mq_free_request(rq);
|
||||||
@@ -1211,7 +1211,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
|
|||||||
* If end_io handler returns NONE, then it still has
|
* If end_io handler returns NONE, then it still has
|
||||||
* ownership of the request.
|
* ownership of the request.
|
||||||
*/
|
*/
|
||||||
if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
|
if (rq->end_io && rq->end_io(rq, 0, iob) == RQ_END_IO_NONE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
||||||
@@ -1458,7 +1458,8 @@ struct blk_rq_wait {
|
|||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
|
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct blk_rq_wait *wait = rq->end_io_data;
|
struct blk_rq_wait *wait = rq->end_io_data;
|
||||||
|
|
||||||
@@ -1688,7 +1689,7 @@ static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expi
|
|||||||
void blk_mq_put_rq_ref(struct request *rq)
|
void blk_mq_put_rq_ref(struct request *rq)
|
||||||
{
|
{
|
||||||
if (is_flush_rq(rq)) {
|
if (is_flush_rq(rq)) {
|
||||||
if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
|
if (rq->end_io(rq, 0, NULL) == RQ_END_IO_FREE)
|
||||||
blk_mq_free_request(rq);
|
blk_mq_free_request(rq);
|
||||||
} else if (req_ref_put_and_test(rq)) {
|
} else if (req_ref_put_and_test(rq)) {
|
||||||
__blk_mq_free_request(rq);
|
__blk_mq_free_request(rq);
|
||||||
|
|||||||
@@ -295,7 +295,8 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret end_clone_request(struct request *clone,
|
static enum rq_end_io_ret end_clone_request(struct request *clone,
|
||||||
blk_status_t error)
|
blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||||
|
|
||||||
|
|||||||
@@ -1333,7 +1333,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
||||||
blk_status_t status)
|
blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct nvme_ctrl *ctrl = rq->end_io_data;
|
struct nvme_ctrl *ctrl = rq->end_io_data;
|
||||||
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
|
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
|
||||||
|
|||||||
@@ -410,7 +410,8 @@ static void nvme_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
||||||
blk_status_t err)
|
blk_status_t err,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct io_uring_cmd *ioucmd = req->end_io_data;
|
struct io_uring_cmd *ioucmd = req->end_io_data;
|
||||||
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
|
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
|
||||||
|
|||||||
@@ -1615,7 +1615,8 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
|
|||||||
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
|
static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||||
|
|
||||||
@@ -2858,7 +2859,8 @@ out_unlock:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
|
static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
|
||||||
blk_status_t error)
|
blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct nvme_queue *nvmeq = req->end_io_data;
|
struct nvme_queue *nvmeq = req->end_io_data;
|
||||||
|
|
||||||
@@ -2868,14 +2870,15 @@ static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
|
static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
|
||||||
blk_status_t error)
|
blk_status_t error,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct nvme_queue *nvmeq = req->end_io_data;
|
struct nvme_queue *nvmeq = req->end_io_data;
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
|
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
|
||||||
|
|
||||||
return nvme_del_queue_end(req, error);
|
return nvme_del_queue_end(req, error, iob);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
|
||||||
|
|||||||
@@ -247,7 +247,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
|
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
|
||||||
blk_status_t blk_status)
|
blk_status_t blk_status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct nvmet_req *req = rq->end_io_data;
|
struct nvmet_req *req = rq->end_io_data;
|
||||||
|
|
||||||
|
|||||||
@@ -2085,7 +2085,8 @@ maybe_retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret eh_lock_door_done(struct request *req,
|
static enum rq_end_io_ret eh_lock_door_done(struct request *req,
|
||||||
blk_status_t status)
|
blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
blk_mq_free_request(req);
|
blk_mq_free_request(req);
|
||||||
return RQ_END_IO_NONE;
|
return RQ_END_IO_NONE;
|
||||||
|
|||||||
@@ -177,7 +177,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
|
|||||||
} Sg_device;
|
} Sg_device;
|
||||||
|
|
||||||
/* tasklet or soft irq callback */
|
/* tasklet or soft irq callback */
|
||||||
static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
|
static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob);
|
||||||
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
|
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
|
||||||
static int sg_finish_rem_req(Sg_request * srp);
|
static int sg_finish_rem_req(Sg_request * srp);
|
||||||
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
|
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
|
||||||
@@ -1309,7 +1310,8 @@ sg_rq_end_io_usercontext(struct work_struct *work)
|
|||||||
* level when a command is completed (or has failed).
|
* level when a command is completed (or has failed).
|
||||||
*/
|
*/
|
||||||
static enum rq_end_io_ret
|
static enum rq_end_io_ret
|
||||||
sg_rq_end_io(struct request *rq, blk_status_t status)
|
sg_rq_end_io(struct request *rq, blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||||
struct sg_request *srp = rq->end_io_data;
|
struct sg_request *srp = rq->end_io_data;
|
||||||
|
|||||||
@@ -525,7 +525,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
|
static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
|
||||||
blk_status_t status)
|
blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
||||||
struct st_request *SRpnt = req->end_io_data;
|
struct st_request *SRpnt = req->end_io_data;
|
||||||
|
|||||||
@@ -39,7 +39,8 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
|
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
|
||||||
static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
|
static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t,
|
||||||
|
const struct io_comp_batch *);
|
||||||
|
|
||||||
/* pscsi_attach_hba():
|
/* pscsi_attach_hba():
|
||||||
*
|
*
|
||||||
@@ -1001,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret pscsi_req_done(struct request *req,
|
static enum rq_end_io_ret pscsi_req_done(struct request *req,
|
||||||
blk_status_t status)
|
blk_status_t status,
|
||||||
|
const struct io_comp_batch *iob)
|
||||||
{
|
{
|
||||||
struct se_cmd *cmd = req->end_io_data;
|
struct se_cmd *cmd = req->end_io_data;
|
||||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
|
||||||
|
|||||||
@@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
struct blk_mq_tags;
|
struct blk_mq_tags;
|
||||||
struct blk_flush_queue;
|
struct blk_flush_queue;
|
||||||
|
struct io_comp_batch;
|
||||||
|
|
||||||
#define BLKDEV_MIN_RQ 4
|
#define BLKDEV_MIN_RQ 4
|
||||||
#define BLKDEV_DEFAULT_RQ 128
|
#define BLKDEV_DEFAULT_RQ 128
|
||||||
@@ -22,7 +23,8 @@ enum rq_end_io_ret {
|
|||||||
RQ_END_IO_FREE,
|
RQ_END_IO_FREE,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
|
typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t,
|
||||||
|
const struct io_comp_batch *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* request flags */
|
* request flags */
|
||||||
|
|||||||
Reference in New Issue
Block a user