dect
/
linux-2.6
Archived
13
0
Fork 0

ide: call ide_build_sglist() prior to ->dma_setup (v2)

* Re-map sg table if needed in ide_build_sglist().

* Move ide_build_sglist() call from ->dma_setup to its users.

* Un-export ide_build_sglist().

v2:
* Build fix for CONFIG_BLK_DEV_IDEDMA=n (noticed by Randy Dunlap).

There should be no functional changes caused by this patch.

Cc: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
This commit is contained in:
Bartlomiej Zolnierkiewicz 2009-03-27 12:46:37 +01:00
parent b109f526ca
commit e6830a86c2
10 changed files with 24 additions and 41 deletions

View File

@ -211,21 +211,16 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
static int auide_build_dmatable(ide_drive_t *drive)
{
int i, iswrite, count = 0;
ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->rq;
_auide_hwif *ahwif = &auide_hwif;
struct scatterlist *sg;
int i = hwif->sg_nents, iswrite, count = 0;
iswrite = (rq_data_dir(rq) == WRITE);
/* Save for interrupt context */
ahwif->drive = drive;
hwif->sg_nents = i = ide_build_sglist(drive, rq);
if (!i)
return 0;
/* fill the descriptors */
sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {

View File

@ -325,12 +325,6 @@ static int icside_dma_setup(ide_drive_t *drive)
*/
BUG_ON(dma_channel_active(ec->dma));
hwif->sg_nents = ide_build_sglist(drive, rq);
if (hwif->sg_nents == 0) {
ide_map_sg(drive, rq);
return 1;
}
/*
* Ensure that we have the right interrupt routed.
*/

View File

@ -631,18 +631,23 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive)
struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
ide_expiry_t *expiry = NULL;
struct request *rq = hwif->rq;
unsigned int timeout;
u32 tf_flags;
u16 bcount;
if (dev_is_idecd(drive)) {
tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL;
bcount = ide_cd_get_xferlen(hwif->rq);
bcount = ide_cd_get_xferlen(rq);
expiry = ide_cd_expiry;
timeout = ATAPI_WAIT_PC;
if (drive->dma)
drive->dma = !hwif->dma_ops->dma_setup(drive);
if (drive->dma) {
if (ide_build_sglist(drive, rq))
drive->dma = !hwif->dma_ops->dma_setup(drive);
else
drive->dma = 0;
}
} else {
pc = drive->pc;
@ -661,8 +666,12 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive)
}
if ((pc->flags & PC_FLAG_DMA_OK) &&
(drive->dev_flags & IDE_DFLAG_USING_DMA))
drive->dma = !hwif->dma_ops->dma_setup(drive);
(drive->dev_flags & IDE_DFLAG_USING_DMA)) {
if (ide_build_sglist(drive, rq))
drive->dma = !hwif->dma_ops->dma_setup(drive);
else
drive->dma = 0;
}
if (!drive->dma)
pc->flags &= ~PC_FLAG_DMA_OK;

View File

@ -120,10 +120,6 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
struct scatterlist *sg;
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
hwif->sg_nents = ide_build_sglist(drive, rq);
if (hwif->sg_nents == 0)
return 0;
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
u32 cur_addr, cur_len, xcount, bcount;

View File

@ -138,14 +138,15 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
hwif->sg_dma_direction = DMA_TO_DEVICE;
i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
if (i) {
if (i == 0)
ide_map_sg(drive, rq);
else {
hwif->orig_sg_nents = hwif->sg_nents;
hwif->sg_nents = i;
}
return i;
}
EXPORT_SYMBOL_GPL(ide_build_sglist);
/**
* ide_destroy_dmatable - clean up DMA mapping

View File

@ -103,6 +103,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
return ide_started;
default:
if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
ide_build_sglist(drive, hwif->rq) == 0 ||
dma_ops->dma_setup(drive))
return ide_stopped;
dma_ops->dma_exec_cmd(drive, tf->command);

View File

@ -1429,10 +1429,10 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct dbdma_cmd *table;
int i, count = 0;
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = (rq_data_dir(rq) == WRITE);
int i = hwif->sg_nents, count = 0;
/* DMA table is already aligned */
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
@ -1442,11 +1442,6 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
while (readl(&dma->status) & RUN)
udelay(1);
hwif->sg_nents = i = ide_build_sglist(drive, rq);
if (!i)
return 0;
/* Build DBDMA commands list */
sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {

View File

@ -429,15 +429,9 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
{
ide_hwif_t *hwif = drive->hwif;
unsigned int *table = hwif->dmatable_cpu;
unsigned int count = 0, i = 1;
struct scatterlist *sg;
unsigned int count = 0, i = hwif->sg_nents;
struct scatterlist *sg = hwif->sg_table;
hwif->sg_nents = i = ide_build_sglist(drive, rq);
if (!i)
return 0; /* sglist of length Zero */
sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
dma_addr_t cur_addr;
int cur_len;

View File

@ -240,10 +240,6 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
int i;
struct scatterlist *sg;
hwif->sg_nents = ide_build_sglist(drive, rq);
if (hwif->sg_nents == 0)
return 0;
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
u32 cur_addr, cur_len, bcount;

View File

@ -1477,6 +1477,8 @@ static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
static inline int ide_build_sglist(ide_drive_t *drive,
struct request *rq) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifdef CONFIG_BLK_DEV_IDEACPI