mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Merge tag 'for-6.14/block-20250118' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe:
- NVMe pull requests via Keith:
- Target support for PCI-Endpoint transport (Damien)
- TCP IO queue spreading fixes (Sagi, Chaitanya)
- Target handling for "limited retry" flags (Guixen)
- Poll type fix (Yongsoo)
- Xarray storage error handling (Keisuke)
- Host memory buffer free size fix on error (Francis)
- MD pull requests via Song:
- Reintroduce md-linear (Yu Kuai)
- md-bitmap refactor and fix (Yu Kuai)
- Replace kmap_atomic with kmap_local_page (David Reaver)
- Quite a few queue freeze and debugfs deadlock fixes
Ming introduced lockdep support for this in the 6.13 kernel, and it
has (unsurprisingly) uncovered quite a few issues
- Use const attributes for IO schedulers
- Remove bio ioprio wrappers
- Fixes for stacked device atomic write support
- Refactor queue affinity helpers, in preparation for better supporting
isolated CPUs
- Cleanups of loop O_DIRECT handling
- Cleanup of BLK_MQ_F_* flags
- Add rotational support for null_blk
- Various fixes and cleanups
* tag 'for-6.14/block-20250118' of git://git.kernel.dk/linux: (106 commits)
block: Don't trim an atomic write
block: Add common atomic writes enable flag
md/md-linear: Fix a NULL vs IS_ERR() bug in linear_add()
block: limit disk max sectors to (LLONG_MAX >> 9)
block: Change blk_stack_atomic_writes_limits() unit_min check
block: Ensure start sector is aligned for stacking atomic writes
blk-mq: Move more error handling into blk_mq_submit_bio()
block: Reorder the request allocation code in blk_mq_submit_bio()
nvme: fix bogus kzalloc() return check in nvme_init_effects_log()
md/md-bitmap: move bitmap_{start, end}write to md upper layer
md/raid5: implement pers->bitmap_sector()
md: add a new callback pers->bitmap_sector()
md/md-bitmap: remove the last parameter for bimtap_ops->endwrite()
md/md-bitmap: factor behind write counters out from bitmap_{start/end}write()
md: Replace deprecated kmap_atomic() with kmap_local_page()
md: reintroduce md-linear
partitions: ldm: remove the initial kernel-doc notation
blk-cgroup: rwstat: fix kernel-doc warnings in header file
blk-cgroup: fix kernel-doc warnings in header file
nbd: fix partial sending
...
This commit is contained in:
@@ -245,6 +245,8 @@ struct nvmet_ctrl {
|
||||
struct nvmet_subsys *subsys;
|
||||
struct nvmet_sq **sqs;
|
||||
|
||||
void *drvdata;
|
||||
|
||||
bool reset_tbkas;
|
||||
|
||||
struct mutex lock;
|
||||
@@ -331,6 +333,8 @@ struct nvmet_subsys {
|
||||
struct config_group namespaces_group;
|
||||
struct config_group allowed_hosts_group;
|
||||
|
||||
u16 vendor_id;
|
||||
u16 subsys_vendor_id;
|
||||
char *model_number;
|
||||
u32 ieee_oui;
|
||||
char *firmware_rev;
|
||||
@@ -411,6 +415,18 @@ struct nvmet_fabrics_ops {
|
||||
void (*discovery_chg)(struct nvmet_port *port);
|
||||
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
|
||||
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
|
||||
|
||||
/* Operations mandatory for PCI target controllers */
|
||||
u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
|
||||
u16 qsize, u64 prp1);
|
||||
u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
|
||||
u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
|
||||
u16 qsize, u64 prp1, u16 irq_vector);
|
||||
u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
|
||||
u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
|
||||
void *feat_data);
|
||||
u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
|
||||
void *feat_data);
|
||||
};
|
||||
|
||||
#define NVMET_MAX_INLINE_BIOVEC 8
|
||||
@@ -520,18 +536,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
|
||||
|
||||
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
|
||||
u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
|
||||
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
|
||||
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
|
||||
u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
|
||||
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
|
||||
u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
|
||||
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
|
||||
u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
|
||||
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
|
||||
u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
|
||||
|
||||
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
|
||||
void nvmet_req_uninit(struct nvmet_req *req);
|
||||
size_t nvmet_req_transfer_len(struct nvmet_req *req);
|
||||
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
|
||||
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
|
||||
void nvmet_req_complete(struct nvmet_req *req, u16 status);
|
||||
@@ -542,19 +564,37 @@ void nvmet_execute_set_features(struct nvmet_req *req);
|
||||
void nvmet_execute_get_features(struct nvmet_req *req);
|
||||
void nvmet_execute_keep_alive(struct nvmet_req *req);
|
||||
|
||||
u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid);
|
||||
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
|
||||
u16 size);
|
||||
u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
|
||||
u16 size);
|
||||
u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
|
||||
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
|
||||
u16 size);
|
||||
u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
|
||||
u16 size);
|
||||
void nvmet_sq_destroy(struct nvmet_sq *sq);
|
||||
int nvmet_sq_init(struct nvmet_sq *sq);
|
||||
|
||||
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
|
||||
|
||||
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
|
||||
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
|
||||
uuid_t *hostid);
|
||||
|
||||
struct nvmet_alloc_ctrl_args {
|
||||
struct nvmet_port *port;
|
||||
char *subsysnqn;
|
||||
char *hostnqn;
|
||||
uuid_t *hostid;
|
||||
const struct nvmet_fabrics_ops *ops;
|
||||
struct device *p2p_client;
|
||||
u32 kato;
|
||||
u32 result;
|
||||
u16 error_loc;
|
||||
u16 status;
|
||||
};
|
||||
|
||||
struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
|
||||
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
||||
const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req);
|
||||
@@ -696,6 +736,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
|
||||
return subsys->type != NVME_NQN_NVME;
|
||||
}
|
||||
|
||||
static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVME_TARGET_PASSTHRU
|
||||
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
|
||||
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
|
||||
@@ -737,6 +782,41 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
|
||||
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
|
||||
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
|
||||
|
||||
static inline bool nvmet_cc_en(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_css(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_mps(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_ams(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_shn(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_iosqes(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
|
||||
}
|
||||
|
||||
static inline u8 nvmet_cc_iocqes(u32 cc)
|
||||
{
|
||||
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
|
||||
}
|
||||
|
||||
/* Convert a 32-bit number to a 16-bit 0's based number */
|
||||
static inline __le16 to0based(u32 a)
|
||||
{
|
||||
@@ -773,7 +853,9 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NVME_TARGET_AUTH
|
||||
u32 nvmet_auth_send_data_len(struct nvmet_req *req);
|
||||
void nvmet_execute_auth_send(struct nvmet_req *req);
|
||||
u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
|
||||
void nvmet_execute_auth_receive(struct nvmet_req *req);
|
||||
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
|
||||
bool set_ctrl);
|
||||
@@ -831,4 +913,26 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
|
||||
{
|
||||
percpu_ref_put(&pc_ref->ref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Data for the get_feature() and set_feature() operations of PCI target
|
||||
* controllers.
|
||||
*/
|
||||
struct nvmet_feat_irq_coalesce {
|
||||
u8 thr;
|
||||
u8 time;
|
||||
};
|
||||
|
||||
struct nvmet_feat_irq_config {
|
||||
u16 iv;
|
||||
bool cd;
|
||||
};
|
||||
|
||||
struct nvmet_feat_arbitration {
|
||||
u8 hpw;
|
||||
u8 mpw;
|
||||
u8 lpw;
|
||||
u8 ab;
|
||||
};
|
||||
|
||||
#endif /* _NVMET_H */
|
||||
|
||||
Reference in New Issue
Block a user