mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
nvmet: support reservation feature
This patch implements the reservation feature, including:
1. reservation register(register, unregister and replace).
2. reservation acquire(acquire, preempt, preempt and abort).
3. reservation release(release and clear).
4. reservation report.
5. set feature and get feature of reservation notify mask.
6. get log page of reservation event.
Not supported:
1. persistent reservation through power loss.
Test cases:
Use nvme-cli and fio to test all implemented sub features:
1. use nvme resv-register to register host a registrant or
unregister or replace a new key.
2. use nvme resv-acquire to set host to the holder, and use fio
to send read and write io in all reservation type. And also
test preempt and "preempt and abort".
3. use nvme resv-report to show all registrants and reservation
status.
4. use nvme resv-release to release all registrants.
5. use nvme get-log to get events generated by the preceding
operations.
In addition, make reservation configurable, one can set ns to
support reservation before enable ns. The default of resv_enable
is false.
Signed-off-by: Guixin Liu <kanie@linux.alibaba.com>
Reviewed-by: Dmitry Bogdanov <d.bogdanov@yadro.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
@@ -20,6 +20,7 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/t10-pi.h>
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
|
||||
|
||||
@@ -30,6 +31,7 @@
|
||||
#define NVMET_MN_MAX_SIZE 40
|
||||
#define NVMET_SN_MAX_SIZE 20
|
||||
#define NVMET_FR_MAX_SIZE 8
|
||||
#define NVMET_PR_LOG_QUEUE_SIZE 64
|
||||
|
||||
/*
|
||||
* Supported optional AENs:
|
||||
@@ -56,6 +58,38 @@
|
||||
#define IPO_IATTR_CONNECT_SQE(x) \
|
||||
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
|
||||
|
||||
struct nvmet_pr_registrant {
|
||||
u64 rkey;
|
||||
uuid_t hostid;
|
||||
enum nvme_pr_type rtype;
|
||||
struct list_head entry;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct nvmet_pr {
|
||||
bool enable;
|
||||
unsigned long notify_mask;
|
||||
atomic_t generation;
|
||||
struct nvmet_pr_registrant __rcu *holder;
|
||||
/*
|
||||
* During the execution of the reservation command, mutual
|
||||
* exclusion is required throughout the process. However,
|
||||
* while waiting asynchronously for the 'per controller
|
||||
* percpu_ref' to complete before the 'preempt and abort'
|
||||
* command finishes, a semaphore is needed to ensure mutual
|
||||
* exclusion instead of a mutex.
|
||||
*/
|
||||
struct semaphore pr_sem;
|
||||
struct list_head registrant_list;
|
||||
};
|
||||
|
||||
struct nvmet_pr_per_ctrl_ref {
|
||||
struct percpu_ref ref;
|
||||
struct completion free_done;
|
||||
struct completion confirm_done;
|
||||
uuid_t hostid;
|
||||
};
|
||||
|
||||
struct nvmet_ns {
|
||||
struct percpu_ref ref;
|
||||
struct file *bdev_file;
|
||||
@@ -85,6 +119,8 @@ struct nvmet_ns {
|
||||
int pi_type;
|
||||
int metadata_size;
|
||||
u8 csi;
|
||||
struct nvmet_pr pr;
|
||||
struct xarray pr_per_ctrl_refs;
|
||||
};
|
||||
|
||||
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
|
||||
@@ -191,6 +227,13 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
|
||||
return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
|
||||
}
|
||||
|
||||
struct nvmet_pr_log_mgr {
|
||||
struct mutex lock;
|
||||
u64 lost_count;
|
||||
u64 counter;
|
||||
DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
|
||||
};
|
||||
|
||||
struct nvmet_ctrl {
|
||||
struct nvmet_subsys *subsys;
|
||||
struct nvmet_sq **sqs;
|
||||
@@ -246,6 +289,7 @@ struct nvmet_ctrl {
|
||||
u8 *dh_key;
|
||||
size_t dh_keysize;
|
||||
#endif
|
||||
struct nvmet_pr_log_mgr pr_log_mgr;
|
||||
};
|
||||
|
||||
struct nvmet_subsys {
|
||||
@@ -396,6 +440,9 @@ struct nvmet_req {
|
||||
struct work_struct zmgmt_work;
|
||||
} z;
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
struct {
|
||||
struct work_struct abort_work;
|
||||
} r;
|
||||
};
|
||||
int sg_cnt;
|
||||
int metadata_sg_cnt;
|
||||
@@ -412,6 +459,7 @@ struct nvmet_req {
|
||||
struct device *p2p_client;
|
||||
u16 error_loc;
|
||||
u64 error_slba;
|
||||
struct nvmet_pr_per_ctrl_ref *pc_ref;
|
||||
};
|
||||
|
||||
#define NVMET_MAX_MPOOL_BVEC 16
|
||||
@@ -498,7 +546,8 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
|
||||
|
||||
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
|
||||
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
|
||||
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp,
|
||||
uuid_t *hostid);
|
||||
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
||||
const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req);
|
||||
@@ -761,4 +810,18 @@ static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
|
||||
static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
|
||||
#endif
|
||||
|
||||
int nvmet_pr_init_ns(struct nvmet_ns *ns);
|
||||
u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
|
||||
u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
|
||||
int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_pr_exit_ns(struct nvmet_ns *ns);
|
||||
void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
|
||||
u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
|
||||
u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
|
||||
u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
|
||||
static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
|
||||
{
|
||||
percpu_ref_put(&pc_ref->ref);
|
||||
}
|
||||
#endif /* _NVMET_H */
|
||||
|
||||
Reference in New Issue
Block a user