dect
/
linux-2.6
Archived
13
0
Fork 0

target: Remove max_sectors device attribute for modern se_task less code

This patch removes the original usage of dev_attr->max_sectors in favor of
dev_attr->hw_max_sectors that is now being enforced by target core from
within transport_generic_cmd_sequencer() for SCF_SCSI_DATA_SG_IO_CDB ops.

After the recent se_task removal patches from hch, this value for IBLOCK
backends being set via configfs by userspace from an saved max_sectors
value that is turning out to be problematic, so it makes sense to go ahead
and remove this now legacy attribute all-together.

This patch also continues to make se_dev_set_default_attribs() do
(sectors / block_size) alignment for what actually get used by
target_core_mod to be safe here, following the same alignment currently
used by fabric_max_sectors.

Reported-by: Andy Grover <agrover@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Roland Dreier <roland@purestorage.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Nicholas Bellinger 2012-05-09 12:42:09 -07:00
parent 2301917044
commit 11e764bd5e
5 changed files with 9 additions and 74 deletions

View File

@ -458,7 +458,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* Set MAXIMUM TRANSFER LENGTH
*/
max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
dev->se_sub_dev->se_dev_attrib.max_sectors);
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]);
/*

View File

@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_max_sectors);
SE_DEV_ATTR_RO(hw_max_sectors);
DEF_DEV_ATTRIB(max_sectors);
SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(fabric_max_sectors);
SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
&target_core_dev_attrib_max_sectors.attr,
&target_core_dev_attrib_fabric_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,

View File

@ -876,15 +876,12 @@ void se_dev_set_default_attribs(
dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
* Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
limits->logical_block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
/*
* Set fabric_max_sectors, which is reported in block limits
* VPD page (B0h).
@ -1168,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return 0;
}
int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
{
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (!max_sectors) {
pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
if (!force && (max_sectors >
dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
return -EINVAL;
}
}
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
max_sectors = se_dev_align_max_sectors(max_sectors,
dev->se_sub_dev->se_dev_attrib.block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {

View File

@ -870,8 +870,9 @@ void transport_dump_dev_state(
*bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
@ -3498,7 +3499,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
BUG_ON(cmd->data_length % attr->block_size);
BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
attr->max_sectors);
attr->hw_max_sectors);
}
atomic_inc(&cmd->t_fe_count);

View File

@ -708,7 +708,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
u32 max_sectors;
u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;