dect
/
linux-2.6
Archived
13
0
Fork 0

[SCSI] qla4xxx: convert to use the data buffer accessors

- remove the unnecessary map_single path.

- convert to use the new accessors for the sg lists and the
parameters.

Jens Axboe <jens.axboe@oracle.com> did the for_each_sg cleanup.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: David C Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
This commit is contained in:
FUJITA Tomonori 2007-05-26 14:08:20 +09:00 committed by James Bottomley
parent 1928d73fac
commit 5f7186c841
3 changed files with 37 additions and 73 deletions

View File

@ -145,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
cmd = srb->cmd;
ha = srb->ha;
if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
@ -158,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
/* Load data segments */
if (cmd->use_sg) {
struct scatterlist *cur_seg;
struct scatterlist *end_seg;
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
cur_seg = (struct scatterlist *)cmd->request_buffer;
end_seg = cur_seg + tot_dsds;
while (cur_seg < end_seg) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
sle_dma = sg_dma_address(cur_seg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
avail_dsds--;
cur_dsd++;
cur_seg++;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
} else {
cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
sle_dma = sg_dma_address(sg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
cur_dsd++;
}
}
@ -208,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
struct scatterlist *sg = NULL;
int nseg;
uint16_t tot_dsds;
uint16_t req_cnt;
@ -237,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
index = (uint32_t)cmd->request->tag;
/* Calculate the number of request entries needed. */
if (cmd->use_sg) {
sg = (struct scatterlist *)cmd->request_buffer;
tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
if (tot_dsds == 0)
goto queuing_error;
} else if (cmd->request_bufflen) {
dma_addr_t req_dma;
nseg = scsi_dma_map(cmd);
if (nseg < 0)
goto queuing_error;
tot_dsds = nseg;
req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
if (dma_mapping_error(req_dma))
goto queuing_error;
srb->dma_handle = req_dma;
tot_dsds = 1;
}
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (ha->req_q_count < (req_cnt + 2)) {
@ -283,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
@ -293,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (cmd->request_bufflen) {
if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
ha->bytes_xfered += cmd->request_bufflen;
ha->bytes_xfered += scsi_bufflen(cmd);
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
@ -363,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
if (cmd->use_sg && tot_dsds) {
sg = (struct scatterlist *) cmd->request_buffer;
pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
} else if (tot_dsds)
pci_unmap_single(ha->pdev, srb->dma_handle,
cmd->request_bufflen, cmd->sc_data_direction);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;

View File

@ -95,7 +95,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (sts_entry->iscsiFlags &
(ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
cmd->resid = residual;
scsi_set_resid(cmd, residual);
cmd->result = DID_OK << 16 | scsi_status;
@ -179,14 +179,14 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
* Firmware detected a SCSI transport underrun
* condition
*/
cmd->resid = residual;
scsi_set_resid(cmd, residual);
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
"detected, xferlen = 0x%x, residual = "
"0x%x\n",
ha->host_no, cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
cmd->request_bufflen,
scsi_bufflen(cmd),
residual));
}
@ -230,7 +230,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
} else if ((cmd->request_bufflen - residual) <
} else if ((scsi_bufflen(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
@ -251,7 +251,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
cmd->request_bufflen, residual));
scsi_bufflen(cmd), residual));
cmd->result = DID_ERROR << 16;
} else {

View File

@ -373,14 +373,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
struct scsi_cmnd *cmd = srb->cmd;
if (srb->flags & SRB_DMA_VALID) {
if (cmd->use_sg) {
pci_unmap_sg(ha->pdev, cmd->request_buffer,
cmd->use_sg, cmd->sc_data_direction);
} else if (cmd->request_bufflen) {
pci_unmap_single(ha->pdev, srb->dma_handle,
cmd->request_bufflen,
cmd->sc_data_direction);
}
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
cmd->SCp.ptr = NULL;