Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Usual driver updates (ufs, lpfc, target, qla2xxx) plus assorted
  cleanups and fixes including the WQ_PERCPU series.

  The biggest core change is the new allocation of pseudo-devices which
  allow the sending of internal commands to a given SCSI target"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (147 commits)
  scsi: MAINTAINERS: Add the UFS include directory
  scsi: scsi_debug: Support injecting unaligned write errors
  scsi: qla2xxx: Fix improper freeing of purex item
  scsi: ufs: rockchip: Fix compile error without CONFIG_GPIOLIB
  scsi: ufs: rockchip: Reset controller on PRE_CHANGE of hce enable notify
  scsi: ufs: core: Use scsi_device_busy()
  scsi: ufs: core: Fix single doorbell mode support
  scsi: pm80xx: Add WQ_PERCPU to alloc_workqueue() users
  scsi: target: Add WQ_PERCPU to alloc_workqueue() users
  scsi: qedi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: target: ibmvscsi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: qedf: Add WQ_PERCPU to alloc_workqueue() users
  scsi: bnx2fc: Add WQ_PERCPU to alloc_workqueue() users
  scsi: be2iscsi: Add WQ_PERCPU to alloc_workqueue() users
  scsi: message: fusion: Add WQ_PERCPU to alloc_workqueue() users
  scsi: lpfc: WQ_PERCPU added to alloc_workqueue() users
  scsi: scsi_transport_fc: WQ_PERCPU added to alloc_workqueue users()
  scsi: scsi_dh_alua: WQ_PERCPU added to alloc_workqueue() users
  scsi: qla2xxx: WQ_PERCPU added to alloc_workqueue() users
  scsi: target: sbp: Replace use of system_unbound_wq with system_dfl_wq
  ...
This commit is contained in:
Linus Torvalds
2025-12-05 19:56:50 -08:00
115 changed files with 4921 additions and 1442 deletions

View File

@@ -730,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer);
queue_work(system_unbound_wq, &agent->work);
queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -764,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent DOORBELL\n");
queue_work(system_unbound_wq, &agent->work);
queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -990,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work);
queue_work(system_unbound_wq, &req->work);
queue_work(system_dfl_wq, &req->work);
} else {
/* don't process this request, just check next_ORB */
sbp_free_request(req);
@@ -1618,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req;
queue_work(system_unbound_wq, &agent->work);
queue_work(system_dfl_wq, &agent->work);
rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr);

View File

@@ -578,6 +578,11 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1300,6 +1305,11 @@ CONFIGFS_ATTR(, max_write_same_len);
CONFIGFS_ATTR(, alua_support);
CONFIGFS_ATTR(, pgr_support);
CONFIGFS_ATTR(, submit_type);
CONFIGFS_ATTR_RO(, atomic_max_len);
CONFIGFS_ATTR_RO(, atomic_alignment);
CONFIGFS_ATTR_RO(, atomic_granularity);
CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
CONFIGFS_ATTR_RO(, atomic_max_boundary);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1343,6 +1353,11 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
&attr_pgr_support,
&attr_emulate_rsoc,
&attr_submit_type,
&attr_atomic_alignment,
&attr_atomic_max_len,
&attr_atomic_granularity,
&attr_atomic_max_with_boundary,
&attr_atomic_max_boundary,
NULL,
};
EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -2758,33 +2773,24 @@ static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct se_device *dev;
struct se_hba *hba;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF] = { };
const char *const end = page + PAGE_SIZE;
char *cur = page;
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
hba = dev->se_hba;
struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
struct se_hba *hba = dev->se_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
cur += scnprintf(cur, end - cur, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE || cur_len > LU_GROUP_NAME_BUF) {
pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
if (WARN_ON_ONCE(cur >= end))
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
return len;
return cur - page;
}
CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);

View File

@@ -814,6 +814,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
/* Skip allocating lun_stats since we can't export them. */
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp);
@@ -840,12 +841,29 @@ free_device:
return NULL;
}
void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
int block_size = bdev_logical_block_size(bdev);
if (!bdev_can_atomic_write(bdev))
return;
attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
attrib->atomic_granularity = attrib->atomic_alignment =
queue_atomic_write_unit_min_bytes(q) / block_size;
attrib->atomic_max_with_boundary = 0;
attrib->atomic_max_boundary = 0;
}
EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct block_device *bdev)
bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
@@ -863,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/*
* Convert from blocksize advertised to the initiator to the 512 byte

View File

@@ -697,7 +697,7 @@ static void target_fabric_port_release(struct config_item *item)
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
kfree_rcu(lun, rcu_head);
call_rcu(&lun->rcu_head, target_tpg_free_lun);
}
static struct configfs_item_operations target_fabric_port_item_ops = {

View File

@@ -92,8 +92,8 @@ static bool fd_configure_unmap(struct se_device *dev)
struct inode *inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode))
return target_configure_unmap_from_queue(&dev->dev_attrib,
I_BDEV(inode));
return target_configure_unmap_from_bdev(&dev->dev_attrib,
I_BDEV(inode));
/* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
dev->dev_attrib.max_unmap_lba_count = 0x2000;

View File

@@ -84,8 +84,8 @@ static bool iblock_configure_unmap(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
return target_configure_unmap_from_queue(&dev->dev_attrib,
ib_dev->ibd_bd);
return target_configure_unmap_from_bdev(&dev->dev_attrib,
ib_dev->ibd_bd);
}
static int iblock_configure_device(struct se_device *dev)
@@ -152,6 +152,8 @@ static int iblock_configure_device(struct se_device *dev)
if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
bi = bdev_get_integrity(bd);
if (!bi)
return 0;
@@ -773,6 +775,9 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
if (cmd->se_cmd_flags & SCF_ATOMIC)
opf |= REQ_ATOMIC;
} else {
opf = REQ_OP_READ;
miter_dir = SG_MITER_FROM_SG;

View File

@@ -125,6 +125,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
void target_tpg_free_lun(struct rcu_head *head);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);

View File

@@ -764,6 +764,49 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
return 0;
}
static sense_reason_t
sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
{
struct se_dev_attrib *attrib = &dev->dev_attrib;
u16 boundary, transfer_len;
u64 lba;
lba = transport_lba_64(cdb);
boundary = get_unaligned_be16(&cdb[10]);
transfer_len = get_unaligned_be16(&cdb[12]);
if (!attrib->atomic_max_len)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (boundary) {
if (transfer_len > attrib->atomic_max_with_boundary)
return TCM_INVALID_CDB_FIELD;
if (boundary > attrib->atomic_max_boundary)
return TCM_INVALID_CDB_FIELD;
} else {
if (transfer_len > attrib->atomic_max_len)
return TCM_INVALID_CDB_FIELD;
}
if (attrib->atomic_granularity) {
if (transfer_len % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
if (boundary && boundary % attrib->atomic_granularity)
return TCM_INVALID_CDB_FIELD;
}
if (dev->dev_attrib.atomic_alignment) {
u64 _lba = lba;
if (do_div(_lba, dev->dev_attrib.atomic_alignment))
return TCM_INVALID_CDB_FIELD;
}
return 0;
}
sense_reason_t
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
@@ -861,6 +904,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
break;
case WRITE_16:
case WRITE_VERIFY_16:
case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
@@ -872,6 +916,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
return ret;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
if (cdb[0] == WRITE_ATOMIC_16) {
cmd->se_cmd_flags |= SCF_ATOMIC;
ret = sbc_check_atomic(dev, cmd, cdb);
if (ret)
return ret;
}
cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:

View File

@@ -521,7 +521,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
@@ -562,11 +561,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP.
*/
put_unaligned_be16(12, &buf[2]);
if (!have_tp)
goto max_write_same;
goto try_atomic;
/*
* Set MAXIMUM UNMAP LBA COUNT
@@ -595,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* MAXIMUM WRITE SAME LENGTH
*/
max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
put_unaligned_be16(40, &buf[2]);
try_atomic:
/*
* ATOMIC
*/
if (!dev->dev_attrib.atomic_max_len)
goto done;
if (dev->dev_attrib.atomic_max_len < io_max_blocks)
put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
else
put_unaligned_be32(io_max_blocks, &buf[44]);
put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
put_unaligned_be16(60, &buf[2]);
done:
return 0;
}
@@ -1452,6 +1470,24 @@ static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
return cmd->se_dev->dev_attrib.atomic_max_len;
}
static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_ATOMIC_16,
.cdb_size = 16,
.usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff,
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
.enabled = tcm_is_atomic_enabled,
.update_usage_bits = set_dpofua_usage_bits,
};
static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
@@ -2008,6 +2044,7 @@ static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_write16,
&tcm_opcode_write_verify16,
&tcm_opcode_write_same32,
&tcm_opcode_write_atomic16,
&tcm_opcode_compare_write,
&tcm_opcode_read_capacity,
&tcm_opcode_read_capacity16,

View File

@@ -276,56 +276,39 @@ static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "exposed\n");
}
static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
struct se_dev_io_stats *stats;
unsigned int cpu;
u32 cmds = 0;
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(dev->stats, cpu);
cmds += stats->total_cmds;
}
/* scsiLuNumCommands */
return snprintf(page, PAGE_SIZE, "%u\n", cmds);
#define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \
static ssize_t \
per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \
char *page) \
{ \
struct stats_struct *stats; \
unsigned int cpu; \
u64 sum = 0; \
\
for_each_possible_cpu(cpu) { \
stats = per_cpu_ptr(per_cpu_stats, cpu); \
sum += stats->field; \
} \
\
return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
}
static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
struct se_dev_io_stats *stats;
unsigned int cpu;
u32 bytes = 0;
#define lu_show_per_cpu_stat(prefix, field, shift) \
per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \
static ssize_t \
target_stat_##prefix##_show(struct config_item *item, char *page) \
{ \
struct se_device *dev = to_stat_lu_dev(item); \
\
return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \
} \
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(dev->stats, cpu);
bytes += stats->read_bytes;
}
/* scsiLuReadMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
struct se_dev_io_stats *stats;
unsigned int cpu;
u32 bytes = 0;
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(dev->stats, cpu);
bytes += stats->write_bytes;
}
/* scsiLuWrittenMegaBytes */
return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
/* scsiLuNumCommands */
lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0);
/* scsiLuReadMegaBytes */
lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20);
/* scsiLuWrittenMegaBytes */
lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20);
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
{
@@ -623,53 +606,30 @@ static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
return ret;
}
static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%lu\n",
atomic_long_read(&lun->lun_stats.cmd_pdus));
rcu_read_unlock();
return ret;
#define tgt_port_show_per_cpu_stat(prefix, field, shift) \
per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \
static ssize_t \
target_stat_##prefix##_show(struct config_item *item, char *page) \
{ \
struct se_lun *lun = to_stat_tgt_port(item); \
struct se_device *dev; \
int ret; \
\
rcu_read_lock(); \
dev = rcu_dereference(lun->lun_se_dev); \
if (!dev) { \
rcu_read_unlock(); \
return -ENODEV; \
} \
\
ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \
rcu_read_unlock(); \
return ret; \
}
static ssize_t target_stat_tgt_port_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.rx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_tgt_port_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun *lun = to_stat_tgt_port(item);
struct se_device *dev;
ssize_t ret = -ENODEV;
rcu_read_lock();
dev = rcu_dereference(lun->lun_se_dev);
if (dev)
ret = snprintf(page, PAGE_SIZE, "%u\n",
(u32)(atomic_long_read(&lun->lun_stats.tx_data_octets) >> 20));
rcu_read_unlock();
return ret;
}
tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0);
tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20);
tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20);
static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
char *page)
@@ -1035,92 +995,34 @@ static ssize_t target_stat_auth_att_count_show(struct config_item *item,
return ret;
}
static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
unsigned int cpu;
ssize_t ret;
u32 cmds = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(deve->stats, cpu);
cmds += stats->total_cmds;
}
/* scsiAuthIntrOutCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
rcu_read_unlock();
return ret;
#define auth_show_per_cpu_stat(prefix, field, shift) \
per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \
static ssize_t \
target_stat_##prefix##_show(struct config_item *item, char *page) \
{ \
struct se_lun_acl *lacl = auth_to_lacl(item); \
struct se_node_acl *nacl = lacl->se_lun_nacl; \
struct se_dev_entry *deve; \
int ret; \
\
rcu_read_lock(); \
deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \
if (!deve) { \
rcu_read_unlock(); \
return -ENODEV; \
} \
\
ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \
rcu_read_unlock(); \
return ret; \
}
static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
unsigned int cpu;
ssize_t ret;
u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(deve->stats, cpu);
bytes += stats->read_bytes;
}
/* scsiAuthIntrReadMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
unsigned int cpu;
ssize_t ret;
u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (!deve) {
rcu_read_unlock();
return -ENODEV;
}
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(deve->stats, cpu);
bytes += stats->write_bytes;
}
/* scsiAuthIntrWrittenMegaBytes */
ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
/* scsiAuthIntrOutCommands */
auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0);
/* scsiAuthIntrReadMegaBytes */
auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20);
/* scsiAuthIntrWrittenMegaBytes */
auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20);
static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
char *page)

View File

@@ -548,7 +548,7 @@ int core_tpg_register(
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
true, g_lun0_dev);
if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0);
target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head);
return ret;
}
}
@@ -595,7 +595,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
if (se_tpg->proto_id >= 0) {
core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun);
}
target_tpg_deregister_rtpi(se_tpg);
@@ -615,6 +615,13 @@ struct se_lun *core_tpg_alloc_lun(
pr_err("Unable to allocate se_lun memory\n");
return ERR_PTR(-ENOMEM);
}
lun->lun_stats = alloc_percpu(struct scsi_port_stats);
if (!lun->lun_stats) {
pr_err("Unable to allocate se_lun stats memory\n");
goto free_lun;
}
lun->unpacked_lun = unpacked_lun;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
@@ -628,6 +635,18 @@ struct se_lun *core_tpg_alloc_lun(
lun->lun_tpg = tpg;
return lun;
free_lun:
kfree(lun);
return ERR_PTR(-ENOMEM);
}
void target_tpg_free_lun(struct rcu_head *head)
{
struct se_lun *lun = container_of(head, struct se_lun, rcu_head);
free_percpu(lun->lun_stats);
kfree(lun);
}
int core_tpg_add_lun(

View File

@@ -126,12 +126,12 @@ int init_se_kmem_caches(void)
}
target_completion_wq = alloc_workqueue("target_completion",
WQ_MEM_RECLAIM, 0);
WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_completion_wq)
goto out_free_lba_map_mem_cache;
target_submission_wq = alloc_workqueue("target_submission",
WQ_MEM_RECLAIM, 0);
WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_submission_wq)
goto out_free_completion_wq;
@@ -1571,7 +1571,12 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
/*
* If this is the xcopy_lun then we won't have lun_stats since we
* can't export them.
*/
if (cmd->se_lun->lun_stats)
this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus);
return 0;
}
EXPORT_SYMBOL(target_cmd_parse_cdb);
@@ -2597,8 +2602,9 @@ queue_rsp:
!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
if (cmd->se_lun->lun_stats)
this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
cmd->data_length);
/*
* Perform READ_STRIP of PI using software emulation when
* backend had PI enabled, if the transport will not be
@@ -2621,14 +2627,16 @@ queue_rsp:
goto queue_full;
break;
case DMA_TO_DEVICE:
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.rx_data_octets);
if (cmd->se_lun->lun_stats)
this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets,
cmd->data_length);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
if (cmd->se_cmd_flags & SCF_BIDI) {
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
if (cmd->se_lun->lun_stats)
this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd);
if (ret)
goto queue_full;

View File

@@ -462,7 +462,7 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
int target_xcopy_setup_pt(void)
{
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!xcopy_wq) {
pr_err("Unable to allocate xcopy_wq\n");
return -ENOMEM;

View File

@@ -250,7 +250,7 @@ static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
wq = alloc_workqueue("tcm_fc", 0, 1);
wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
if (!wq) {
kfree(tpg);
return NULL;