dect
/
linux-2.6
Archived
13
0
Fork 0

target: add a parse_cdb method to the backend drivers

Instead of trying to handle all SCSI command sets in one function
(transport_generic_cmd_sequencer) call out to the backend driver to perform
this functionality.  For pSCSI a copy of the existing code is used, but for
all virtual backends we can use a new parse_sbc_cdb helper is used to
provide a simple SBC emulation.

For now this setups means a fair amount of duplication between pSCSI and the
SBC library, but patches later in this series will sort out that problem.

(nab: Fix up build failure in target_core_pscsi.c)

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Christoph Hellwig 2012-05-20 11:59:14 -04:00 committed by Nicholas Bellinger
parent 88455ec4be
commit d6e0175cf3
10 changed files with 936 additions and 733 deletions

View File

@ -10,6 +10,7 @@ target_core_mod-y := target_core_configfs.o \
target_core_tpg.o \
target_core_transport.o \
target_core_cdb.o \
target_core_sbc.o \
target_core_spc.o \
target_core_ua.o \
target_core_rd.o \

View File

@ -561,6 +561,7 @@ static struct se_subsystem_api fileio_template = {
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
.parse_cdb = sbc_parse_cdb,
.execute_cmd = fd_execute_cmd,
.do_sync_cache = fd_emulate_sync_cache,
.check_configfs_dev_params = fd_check_configfs_dev_params,

View File

@ -653,6 +653,7 @@ static struct se_subsystem_api iblock_template = {
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
.parse_cdb = sbc_parse_cdb,
.execute_cmd = iblock_execute_cmd,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,

View File

@ -96,9 +96,6 @@ int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
/* target_core_spc.c */
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough);
/* target_core_transport.c */
extern struct kmem_cache *se_tmr_req_cache;

View File

@ -35,8 +35,10 @@
#include <linux/spinlock.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/file.h>
#include <linux/ratelimit.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@ -46,6 +48,7 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_alua.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
@ -1019,6 +1022,470 @@ fail:
return -ENOMEM;
}
static inline u32 pscsi_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 8-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
type_disk:
return cdb[4] ? : 256;
}
static inline u32 pscsi_get_sectors_10(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 16-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* XXX_10 is not defined in SSC, throw an exception
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
*ret = -EINVAL;
return 0;
}
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 16-bit sector value.
*/
type_disk:
return (u32)(cdb[7] << 8) + cdb[8];
}
static inline u32 pscsi_get_sectors_12(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* XXX_12 is not defined in SSC, throw an exception
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
*ret = -EINVAL;
return 0;
}
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 32-bit sector value.
*/
type_disk:
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}
static inline u32 pscsi_get_sectors_16(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
(cdb[12] << 8) + cdb[13];
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 pscsi_get_sectors_32(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
(cdb[30] << 8) + cdb[31];
}
static inline u32 pscsi_get_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}
static inline u32 pscsi_get_lba_32(unsigned char *cdb)
{
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}
static inline unsigned long long pscsi_get_lba_64(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
*/
static inline unsigned long long pscsi_get_lba_64_ext(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
static inline u32 pscsi_get_size(
u32 sectors,
unsigned char *cdb,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
if (cdb[1] & 1) { /* sectors */
return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
} else /* bytes */
return sectors;
}
pr_debug("Returning block_size: %u, sectors: %u == %u for"
" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
dev->transport->name);
return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
static int pscsi_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
unsigned char *cdb = cmd->t_task_cdb;
int sector_ret = 0;
u32 sectors = 0;
u16 service_action;
int ret;
if (cmd->se_cmd_flags & SCF_BIDI)
goto out_unsupported_cdb;
switch (cdb[0]) {
case READ_6:
sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_10:
sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_12:
sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_16:
sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_6:
sectors = pscsi_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_12:
sectors = pscsi_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_16:
sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
cmd->t_task_lba = pscsi_get_lba_64(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
sectors = pscsi_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
}
*size = pscsi_get_size(1, cdb, cmd);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
case GPCMD_READ_BUFFER_CAPACITY:
case GPCMD_SEND_OPC:
*size = (cdb[7] << 8) + cdb[8];
break;
case READ_BLOCK_LIMITS:
*size = READ_BLOCK_LEN;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
case GPCMD_READ_DISC_INFO:
case GPCMD_READ_TRACK_RZONE_INFO:
*size = (cdb[7] << 8) + cdb[8];
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
*size = (cdb[8] << 8) + cdb[9];
break;
case READ_POSITION:
*size = READ_POSITION_LEN;
break;
case READ_BUFFER:
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break;
case READ_CAPACITY:
*size = READ_CAP_LEN;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SERVICE_ACTION_IN:
case ACCESS_CONTROL_IN:
case ACCESS_CONTROL_OUT:
*size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
break;
case READ_TOC:
*size = cdb[8];
break;
case READ_ELEMENT_STATUS:
*size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
cmd->t_task_lba = pscsi_get_lba_32(cdb);
} else {
sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
cmd->t_task_lba = pscsi_get_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
*size = pscsi_get_size(sectors, cdb, cmd);
break;
case UNMAP:
*size = get_unaligned_be16(&cdb[7]);
break;
case WRITE_SAME_16:
sectors = pscsi_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
*size = pscsi_get_size(1, cdb, cmd);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
break;
case WRITE_SAME:
sectors = pscsi_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
*size = pscsi_get_size(1, cdb, cmd);
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
break;
case ALLOW_MEDIUM_REMOVAL:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case START_STOP:
case VERIFY:
case WRITE_FILEMARKS:
case GPCMD_CLOSE_TRACK:
case INITIALIZE_ELEMENT_STATUS:
case GPCMD_LOAD_UNLOAD:
case GPCMD_SET_SPEED:
case MOVE_MEDIUM:
*size = 0;
break;
case GET_EVENT_STATUS_NOTIFICATION:
*size = (cdb[7] << 8) | cdb[8];
break;
case ATA_16:
switch (cdb[2] & 0x3) { /* T_LENGTH */
case 0x0:
sectors = 0;
break;
case 0x1:
sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
break;
case 0x2:
sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
break;
case 0x3:
pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
goto out_invalid_cdb_field;
}
/* BYTE_BLOCK */
if (cdb[2] & 0x4) {
/* BLOCK T_TYPE: 512 or sector */
*size = sectors * ((cdb[2] & 0x10) ?
dev->se_sub_dev->se_dev_attrib.block_size : 512);
} else {
/* BYTE */
*size = sectors;
}
break;
default:
ret = spc_parse_cdb(cmd, size, true);
if (ret)
return ret;
}
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field;
}
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field;
}
}
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
@ -1188,6 +1655,7 @@ static struct se_subsystem_api pscsi_template = {
.create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb,
.execute_cmd = pscsi_execute_cmd,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,

View File

@ -468,6 +468,7 @@ static struct se_subsystem_api rd_mcp_template = {
.allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice,
.free_device = rd_free_device,
.parse_cdb = sbc_parse_cdb,
.execute_cmd = rd_execute_cmd,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,

View File

@ -0,0 +1,450 @@
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_ua.h"
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
static int sbc_check_valid_sectors(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long end_lba;
u32 sectors;
sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
cmd->t_task_lba, sectors, end_lba);
return -EINVAL;
}
return 0;
}
static inline u32 transport_get_sectors_6(unsigned char *cdb)
{
/*
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(unsigned char *cdb)
{
return (u32)(cdb[7] << 8) + cdb[8];
}
static inline u32 transport_get_sectors_12(unsigned char *cdb)
{
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}
static inline u32 transport_get_sectors_16(unsigned char *cdb)
{
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
(cdb[12] << 8) + cdb[13];
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32(unsigned char *cdb)
{
return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
(cdb[30] << 8) + cdb[31];
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
*/
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
static int sbc_write_same_supported(struct se_device *dev,
unsigned char *flags)
{
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
return -ENOSYS;
}
/*
* Currently for the emulated case we only accept
* tpws with the UNMAP=1 bit set.
*/
if (!(flags[0] & 0x08)) {
pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
return -ENOSYS;
}
return 0;
}
static void xdreadwrite_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
unsigned int offset;
int i;
int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
* 1) read the specified logical block(s);
* 2) transfer logical blocks from the data-out buffer;
* 3) XOR the logical blocks transferred from the data-out buffer with
* the logical blocks read, storing the resulting XOR data in a buffer;
* 4) if the DISABLE WRITE bit is set to zero, then write the logical
* blocks transferred from the data-out buffer; and
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
* Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
sg_copy_to_buffer(cmd->t_data_sg,
cmd->t_data_nents,
buf,
cmd->data_length);
/*
* Now perform the XOR against the BIDI read memory located at
* cmd->t_mem_bidi_list
*/
offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg));
if (!addr)
goto out;
for (i = 0; i < sg->length; i++)
*(addr + sg->offset + i) ^= *(buf + offset + i);
offset += sg->length;
kunmap_atomic(addr);
}
out:
kfree(buf);
}
int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
u32 sectors = 0;
int ret;
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb);
/*
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
}
*size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
if (sbc_write_same_supported(dev, &cdb[10]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = target_emulate_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
}
case READ_CAPACITY:
*size = READ_CAP_LEN;
cmd->execute_cmd = target_emulate_readcapacity;
break;
case SERVICE_ACTION_IN:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = target_emulate_readcapacity_16;
break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field;
}
*size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
*size = sbc_get_size(cmd, sectors);
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
}
cmd->execute_cmd = target_emulate_synchronize_cache;
break;
case UNMAP:
*size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = target_emulate_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
*size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = target_emulate_write_same;
break;
case WRITE_SAME:
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
*size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
/*
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = target_emulate_write_same;
break;
case VERIFY:
*size = 0;
cmd->execute_cmd = target_emulate_noop;
break;
default:
ret = spc_parse_cdb(cmd, size, false);
if (ret)
return ret;
}
/* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
goto out_unsupported_cdb;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field;
}
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field;
}
*size = sbc_get_size(cmd, sectors);
}
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
EXPORT_SYMBOL(sbc_parse_cdb);

View File

@ -152,6 +152,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough)
cmd->sam_task_attr = MSG_HEAD_TAG;
break;
case TEST_UNIT_READY:
*size = 0;
if (!passthrough)
cmd->execute_cmd = target_emulate_noop;
break;

View File

@ -1343,8 +1343,6 @@ static inline void transport_generic_prepare_cdb(
}
}
static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
static int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
struct se_device *dev = cmd->se_dev;
@ -1471,6 +1469,7 @@ int target_setup_cmd_from_cdb(
u32 pr_reg_type = 0;
u8 alua_ascq = 0;
unsigned long flags;
unsigned int size;
int ret;
transport_generic_prepare_cdb(cdb);
@ -1562,13 +1561,11 @@ int target_setup_cmd_from_cdb(
*/
}
/*
* Setup the received CDB based on SCSI defined opcodes and
* perform unit attention, persistent reservations and ALUA
* checks for virtual device backends. The cmd->t_task_cdb
* pointer is expected to be setup before we reach this point.
*/
ret = transport_generic_cmd_sequencer(cmd, cdb);
ret = cmd->se_dev->transport->parse_cdb(cmd, &size);
if (ret < 0)
return ret;
ret = target_cmd_size_check(cmd, size);
if (ret < 0)
return ret;
@ -1694,10 +1691,7 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
target_put_sess_cmd(se_sess, se_cmd);
return;
}
/*
* Sanitize CDBs via transport_generic_cmd_sequencer() and
* allocate the necessary tasks to complete the received CDB+data
*/
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
transport_generic_request_failure(se_cmd);
@ -1966,39 +1960,6 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
static inline u32 transport_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
*/
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* Called from Fabric Module context from transport_execute_tasks()
*
@ -2147,217 +2108,6 @@ check_depth:
return 0;
}
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 8-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
type_disk:
return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 16-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* XXX_10 is not defined in SSC, throw an exception
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
*ret = -EINVAL;
return 0;
}
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 16-bit sector value.
*/
type_disk:
return (u32)(cdb[7] << 8) + cdb[8];
}
static inline u32 transport_get_sectors_12(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* XXX_12 is not defined in SSC, throw an exception
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
*ret = -EINVAL;
return 0;
}
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 32-bit sector value.
*/
type_disk:
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}
static inline u32 transport_get_sectors_16(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
(cdb[12] << 8) + cdb[13];
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
(cdb[30] << 8) + cdb[31];
}
static inline u32 transport_get_size(
u32 sectors,
unsigned char *cdb,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
if (cdb[1] & 1) { /* sectors */
return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
} else /* bytes */
return sectors;
}
pr_debug("Returning block_size: %u, sectors: %u == %u for"
" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
dev->transport->name);
return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
static void transport_xor_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
unsigned int offset;
int i;
int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
* 1) read the specified logical block(s);
* 2) transfer logical blocks from the data-out buffer;
* 3) XOR the logical blocks transferred from the data-out buffer with
* the logical blocks read, storing the resulting XOR data in a buffer;
* 4) if the DISABLE WRITE bit is set to zero, then write the logical
* blocks transferred from the data-out buffer; and
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
* Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
sg_copy_to_buffer(cmd->t_data_sg,
cmd->t_data_nents,
buf,
cmd->data_length);
/*
* Now perform the XOR against the BIDI read memory located at
* cmd->t_mem_bidi_list
*/
offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg));
if (!addr)
goto out;
for (i = 0; i < sg->length; i++)
*(addr + sg->offset + i) ^= *(buf + offset + i);
offset += sg->length;
kunmap_atomic(addr);
}
out:
kfree(buf);
}
/*
* Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
*/
@ -2439,478 +2189,6 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
return 0;
}
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
/*
* Determine if the received WRITE_SAME is used to for direct
* passthrough into Linux/SCSI with struct request via TCM/pSCSI
* or we are signaling the use of internal WRITE_SAME + UNMAP=1
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
*/
int passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
if (!passthrough) {
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
return -ENOSYS;
}
/*
* Currently for the emulated case we only accept
* tpws with the UNMAP=1 bit set.
*/
if (!(flags[0] & 0x08)) {
pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
return -ENOSYS;
}
}
return 0;
}
static int transport_generic_cmd_sequencer(
struct se_cmd *cmd,
unsigned char *cdb)
{
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
int sector_ret = 0, passthrough;
u32 sectors = 0, size = 0;
u16 service_action;
int ret;
/*
* If we operate in passthrough mode we skip most CDB emulation and
* instead hand the commands down to the physical SCSI device.
*/
passthrough =
(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case WRITE_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Do now allow BIDI commands for passthrough mode.
*/
if (passthrough)
goto out_unsupported_cdb;
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
/*
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Do now allow BIDI commands for passthrough mode.
*/
if (passthrough)
goto out_unsupported_cdb;
/*
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors)
size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
if (target_check_write_same_discard(&cdb[10], dev) < 0)
goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_cmd = target_emulate_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
case MAINTENANCE_IN:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_IN from SCC-2 */
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
} else {
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
break;
case GPCMD_READ_BUFFER_CAPACITY:
case GPCMD_SEND_OPC:
size = (cdb[7] << 8) + cdb[8];
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
case GPCMD_READ_DISC_INFO:
case GPCMD_READ_TRACK_RZONE_INFO:
size = (cdb[7] << 8) + cdb[8];
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
size = (cdb[8] << 8) + cdb[9];
break;
case READ_POSITION:
size = READ_POSITION_LEN;
break;
case MAINTENANCE_OUT:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_OUT from SCC-2
*
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
} else {
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
if (!passthrough)
cmd->execute_cmd = target_emulate_readcapacity;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SERVICE_ACTION_IN:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
if (!passthrough)
cmd->execute_cmd =
target_emulate_readcapacity_16;
break;
default:
if (passthrough)
break;
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field;
}
/*FALLTHROUGH*/
case ACCESS_CONTROL_IN:
case ACCESS_CONTROL_OUT:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
break;
#endif
case READ_TOC:
size = cdb[8];
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
cmd->t_task_lba = transport_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
if (passthrough)
break;
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
if ((cmd->t_task_lba != 0) || (sectors != 0)) {
if (transport_cmd_get_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
}
cmd->execute_cmd = target_emulate_synchronize_cache;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
if (!passthrough)
cmd->execute_cmd = target_emulate_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors)
size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
if (target_check_write_same_discard(&cdb[1], dev) < 0)
goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_cmd = target_emulate_write_same;
break;
case WRITE_SAME:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors)
size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
/*
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (target_check_write_same_discard(&cdb[1], dev) < 0)
goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_cmd = target_emulate_write_same;
break;
case ALLOW_MEDIUM_REMOVAL:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case START_STOP:
case VERIFY:
case WRITE_FILEMARKS:
if (!passthrough)
cmd->execute_cmd = target_emulate_noop;
break;
case GPCMD_CLOSE_TRACK:
case INITIALIZE_ELEMENT_STATUS:
case GPCMD_LOAD_UNLOAD:
case GPCMD_SET_SPEED:
case MOVE_MEDIUM:
break;
case GET_EVENT_STATUS_NOTIFICATION:
size = (cdb[7] << 8) | cdb[8];
break;
case ATA_16:
/* Only support ATA passthrough to pSCSI backends.. */
if (!passthrough)
goto out_unsupported_cdb;
/* T_LENGTH */
switch (cdb[2] & 0x3) {
case 0x0:
sectors = 0;
break;
case 0x1:
sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
break;
case 0x2:
sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
break;
case 0x3:
pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
goto out_invalid_cdb_field;
}
/* BYTE_BLOCK */
if (cdb[2] & 0x4) {
/* BLOCK T_TYPE: 512 or sector */
size = sectors * ((cdb[2] & 0x10) ?
dev->se_sub_dev->se_dev_attrib.block_size : 512);
} else {
/* BYTE */
size = sectors;
}
break;
default:
ret = spc_parse_cdb(cmd, &size, passthrough);
if (ret)
return ret;
}
ret = target_cmd_size_check(cmd, size);
if (ret)
return ret;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field;
}
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field;
}
}
/* reject any command that we don't have a handler for */
if (!(passthrough || cmd->execute_cmd ||
(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)))
goto out_unsupported_cdb;
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
* Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.

View File

@ -24,6 +24,8 @@ struct se_subsystem_api {
struct se_subsystem_dev *, void *);
void (*free_device)(void *);
int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
int (*parse_cdb)(struct se_cmd *cmd, unsigned int *size);
int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
enum dma_data_direction);
int (*do_discard)(struct se_device *, sector_t, u32);
@ -49,6 +51,9 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
void target_complete_cmd(struct se_cmd *, u8);
int sbc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size, bool passthrough);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);