diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 22cf44cf43a..9888693a18f 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -458,7 +458,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) * Set MAXIMUM TRANSFER LENGTH */ max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, - dev->se_sub_dev->se_dev_attrib.max_sectors); + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); put_unaligned_be32(max_sectors, &buf[8]); /* diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index cbb66537d23..931521cc4e4 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_max_sectors); SE_DEV_ATTR_RO(hw_max_sectors); -DEF_DEV_ATTRIB(max_sectors); -SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); - DEF_DEV_ATTRIB(fabric_max_sectors); SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); @@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, - &target_core_dev_attrib_max_sectors.attr, &target_core_dev_attrib_fabric_max_sectors.attr, &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e621350feeb..5ad972856a8 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -876,15 +876,12 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* - * max_sectors is based on subsystem plugin dependent requirements. + * Align max_hw_sectors down to PAGE_SIZE I/O transfers */ - dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, + limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors, limits->logical_block_size); - dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + /* * Set fabric_max_sectors, which is reported in block limits * VPD page (B0h). @@ -1168,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) return 0; } -int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) -{ - int force = 0; /* Force setting for VDEVS */ - - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - pr_err("dev[%p]: Unable to change SE Device" - " max_sectors while dev_export_obj: %d count exists\n", - dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -EINVAL; - } - if (!max_sectors) { - pr_err("dev[%p]: Illegal ZERO value for" - " max_sectors\n", dev); - return -EINVAL; - } - if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - pr_err("dev[%p]: Passed max_sectors: %u less than" - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MIN); - return -EINVAL; - } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors:" - " %u\n", dev, max_sectors, - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - } else { - if (!force && (max_sectors > - dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors" - ": %u, use force=1 to override.\n", dev, - max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } - } - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - max_sectors = se_dev_align_max_sectors(max_sectors, - dev->se_sub_dev->se_dev_attrib.block_size); - - dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", - dev, max_sectors); - return 0; -} - int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 85f14e0fe5d..59577568f93 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -870,8 +870,9 @@ void transport_dump_dev_state( *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", atomic_read(&dev->execute_tasks), dev->queue_depth); - *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); + *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", + dev->se_sub_dev->se_dev_attrib.block_size, + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); *bl += sprintf(b + *bl, " "); } @@ -3498,7 +3499,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) BUG_ON(cmd->data_length % attr->block_size); BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) > - attr->max_sectors); + attr->hw_max_sectors); } atomic_inc(&cmd->t_fe_count); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 927771aff1f..1fe9111f4dc 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -708,7 +708,6 @@ struct se_dev_attrib { u32 hw_block_size; u32 block_size; u32 hw_max_sectors; - u32 max_sectors; u32 fabric_max_sectors; u32 optimal_sectors; u32 hw_queue_depth;