dect
/
linux-2.6
Archived
13
0
Fork 0

libata-sff: clean up BMDMA initialization

When BMDMA initialization failed or BMDMA was not available for
whatever reason, bmdma_addr was left at zero and used as an indication
that BMDMA shouldn't be used.  This leads to the following problems.

p1. For BMDMA drivers which don't use traditional BMDMA register,
    ata_bmdma_mode_filter() incorrectly inhibits DMA modes.  Those
    drivers either have to inherit from ata_sff_port_ops or clear
    ->mode_filter explicitly.

p2. non-BMDMA drivers call into BMDMA PRD table allocation.  It
    doesn't actually allocate PRD table if bmdma_addr is not
    initialized but is still confusing.

p3. For BMDMA drivers which don't use traditional BMDMA register, some
    methods might not be invoked as expected (e.g. bmdma_stop from
    ata_sff_post_internal_cmd()).

p4. SFF drivers w/ custom DMA interface implement noop BMDMA ops
    worrying libata core might call into one of them.

These problems are caused by the muddy line between SFF and BMDMA and
the assumption that all BMDMA controllers initialize bmdma_addr.

This patch fixes p1 and p2 by removing the bmdma_addr assumption and
moving prd allocation to BMDMA port start.  Later patches will fix the
remaining issues.

This patch improves BMDMA initialization such that

* When BMDMA register initialization fails, falls back to PIO instead
  of failing.  ata_pci_bmdma_init() never fails now.

* When ata_pci_bmdma_init() falls back to PIO, it clears
  ap->mwdma_mask and udma_mask instead of depending on
  ata_bmdma_mode_filter().  This makes ata_bmdma_mode_filter()
  unnecessary thus resolving p1.

* ata_port_start() which actually is BMDMA specific is moved to
  ata_bmdma_port_start().  ata_port_start() and ata_sff_port_start()
  are killed.

* ata_sff_port_start32() is moved and renamed to
  ata_bmdma_port_start32().

Drivers which no longer call into PRD table allocation are...

  pdc_adma, sata_inic162x, sata_qstor, sata_sx4, pata_cmd640 and all
  drivers which inherit from ata_sff_port_ops.

pata_icside sets ->port_start to ATA_OP_NULL as it doesn't need PRD
but is a BMDMA controller and doesn't have custom port_start like
other such controllers.

Note that with the previous patch which makes all and only BMDMA
drivers inherit from ata_bmdma_port_ops, this change doesn't break
drivers which need PRD table.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
Tejun Heo 2010-05-10 21:41:34 +02:00 committed by Jeff Garzik
parent 8930ff254a
commit c7087652e1
27 changed files with 116 additions and 171 deletions

View File

@ -5505,30 +5505,6 @@ void ata_host_resume(struct ata_host *host)
}
#endif
/**
* ata_port_start - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates space for PRD table.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_port_start(struct ata_port *ap)
{
struct device *dev = ap->dev;
ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
return 0;
}
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
@ -6757,7 +6733,6 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
EXPORT_SYMBOL_GPL(ata_mode_string);
EXPORT_SYMBOL_GPL(ata_id_xfermask);
EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);

View File

@ -66,8 +66,6 @@ const struct ata_port_operations ata_sff_port_ops = {
.sff_irq_clear = ata_sff_irq_clear,
.lost_interrupt = ata_sff_lost_interrupt,
.port_start = ata_sff_port_start,
};
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
@ -2443,50 +2441,6 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
/**
* ata_sff_port_start - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates space for PRD table if the device
* is DMA capable SFF.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_sff_port_start(struct ata_port *ap)
{
if (ap->ioaddr.bmdma_addr)
return ata_port_start(ap);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_port_start);
/**
* ata_sff_port_start32 - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates space for PRD table if the device
* is DMA capable SFF.
*
* May be used as the port_start() entry in ata_port_operations for
* devices that are capable of 32bit PIO.
*
* LOCKING:
* Inherited from caller.
*/
int ata_sff_port_start32(struct ata_port *ap)
{
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
if (ap->ioaddr.bmdma_addr)
return ata_port_start(ap);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_port_start32);
/**
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr: IO address structure to be initialized
@ -2646,21 +2600,12 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
goto err_out;
/* init DMA related stuff */
rc = ata_pci_bmdma_init(host);
if (rc)
goto err_bmdma;
ata_pci_bmdma_init(host);
devres_remove_group(&pdev->dev, NULL);
*r_host = host;
return 0;
err_bmdma:
/* This is necessary because PCI and iomap resources are
* merged and releasing the top group won't release the
* acquired resources if some of those have been acquired
* before entering this function.
*/
pcim_iounmap_regions(pdev, 0xf);
err_out:
devres_release_group(&pdev->dev, NULL);
return rc;
@ -2843,12 +2788,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
const struct ata_port_operations ata_bmdma_port_ops = {
.inherits = &ata_sff_port_ops,
.mode_filter = ata_bmdma_mode_filter,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.port_start = ata_bmdma_port_start,
};
EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
@ -2856,22 +2801,10 @@ const struct ata_port_operations ata_bmdma32_port_ops = {
.inherits = &ata_bmdma_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
.port_start = ata_sff_port_start32,
.port_start = ata_bmdma_port_start32,
};
EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
unsigned long xfer_mask)
{
/* Filter out DMA modes if the device has been configured by
the BIOS as PIO only */
if (adev->link->ap->ioaddr.bmdma_addr == NULL)
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
return xfer_mask;
}
EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
@ -2976,6 +2909,53 @@ u8 ata_bmdma_status(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_bmdma_status);
/**
* ata_bmdma_port_start - Set port up for bmdma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates space for PRD table.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_bmdma_port_start(struct ata_port *ap)
{
if (ap->mwdma_mask || ap->udma_mask) {
ap->prd = dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
&ap->prd_dma, GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
/**
* ata_bmdma_port_start32 - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Enables 32bit PIO and allocates space for PRD
* table.
*
* May be used as the port_start() entry in ata_port_operations for
* devices that are capable of 32bit PIO.
*
* LOCKING:
* Inherited from caller.
*/
int ata_bmdma_port_start32(struct ata_port *ap)
{
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
return ata_bmdma_port_start(ap);
}
EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
#ifdef CONFIG_PCI
/**
@ -3004,6 +2984,19 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
{
int i;
dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
reason);
for (i = 0; i < 2; i++) {
host->ports[i]->mwdma_mask = 0;
host->ports[i]->udma_mask = 0;
}
}
/**
* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
* @host: target ATA host
@ -3012,33 +3005,40 @@ EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
*
* LOCKING:
* Inherited from calling layer (may sleep).
*
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_pci_bmdma_init(struct ata_host *host)
void ata_pci_bmdma_init(struct ata_host *host)
{
struct device *gdev = host->dev;
struct pci_dev *pdev = to_pci_dev(gdev);
int i, rc;
/* No BAR4 allocation: No DMA */
if (pci_resource_start(pdev, 4) == 0)
return 0;
if (pci_resource_start(pdev, 4) == 0) {
ata_bmdma_nodma(host, "BAR4 is zero");
return;
}
/* TODO: If we get no DMA mask we should fall back to PIO */
/*
* Some controllers require BMDMA region to be initialized
* even if DMA is not in use to clear IRQ status via
* ->sff_irq_clear method. Try to initialize bmdma_addr
* regardless of dma masks.
*/
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
ata_bmdma_nodma(host, "failed to set dma mask");
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
ata_bmdma_nodma(host,
"failed to set consistent dma mask");
}
/* request and iomap DMA region */
rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
if (rc) {
dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
return -ENOMEM;
ata_bmdma_nodma(host, "failed to request/iomap BAR4");
return;
}
host->iomap = pcim_iomap_table(pdev);
@ -3057,8 +3057,6 @@ int ata_pci_bmdma_init(struct ata_host *host)
ata_port_desc(ap, "bmdma 0x%llx",
(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);

View File

@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
{
struct pata_acpi *acpi = adev->link->ap->private_data;
return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
return mask & acpi->mask[adev->devno];
}
/**
@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
ret = ata_sff_port_start(ap);
ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;

View File

@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
return mask &= ~ATA_MASK_UDMA;
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**

View File

@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
.sff_data_xfer = pata_at91_data_xfer_noirq,
.set_piomode = pata_at91_set_piomode,
.cable_detect = ata_cable_40wire,
.port_start = ATA_OP_NULL,
};
static int __devinit pata_at91_probe(struct platform_device *pdev)

View File

@ -1450,8 +1450,6 @@ static struct ata_port_operations bfin_pata_ops = {
.port_start = bfin_port_start,
.port_stop = bfin_port_stop,
.mode_filter = ATA_OP_NULL, /* will be removed soon */
};
static struct ata_port_info bfin_port_info[] = {

View File

@ -153,16 +153,12 @@ static int cmd640_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing;
int ret = ata_sff_port_start(ap);
if (ret < 0)
return ret;
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
if (timing == NULL)
return -ENOMEM;
timing->last = -1; /* Force a load */
ap->private_data = timing;
return ret;
return 0;
}
static struct scsi_host_template cmd640_sht = {

View File

@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
static int hpt36x_cable_detect(struct ata_port *ap)

View File

@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**
@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**

View File

@ -335,7 +335,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.postreset = pata_icside_postreset,
.post_internal_cmd = pata_icside_bmdma_stop,
.mode_filter = ATA_OP_NULL, /* will be removed soon */
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
static void __devinit

View File

@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
struct it821x_dev *itdev;
u8 conf;
int ret = ata_sff_port_start(ap);
int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;

View File

@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
if (priv->dma_table_cpu == NULL) {
dev_err(priv->dev, "Unable to allocate DMA command list\n");
ap->ioaddr.bmdma_addr = NULL;
ap->mwdma_mask = 0;
ap->udma_mask = 0;
}
return 0;
}

View File

@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
struct ata_device *pair = ata_dev_pair(adev);
if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
return ata_bmdma_mode_filter(adev, mask);
return mask;
/* Check for slave of a Maxtor at UDMA6 */
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**

View File

@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
u8 burst = ioread8(bmdma + 0x1f);
iowrite8(burst | 0x01, bmdma + 0x1f);
}
return ata_sff_port_start(ap);
return ata_bmdma_port_start(ap);
}
/**

View File

@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
.set_mode = pata_platform_set_mode,
.port_start = ATA_OP_NULL,
};
static void pata_platform_setup_port(struct ata_ioports *ioaddr,

View File

@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**
@ -892,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap)
* scc_port_start - Set port up for dma.
* @ap: Port to initialize
*
* Allocate space for PRD table using ata_port_start().
* Allocate space for PRD table using ata_bmdma_port_start().
* Set PRD table address for PTERADD. (PRD Transfer End Read)
*/
@ -901,7 +901,7 @@ static int scc_port_start (struct ata_port *ap)
void __iomem *mmio = ap->ioaddr.bmdma_addr;
int rc;
rc = ata_port_start(ap);
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;

View File

@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
/* Disk, UDMA */
if (adev->class != ATA_DEV_ATA)
return ata_bmdma_mode_filter(adev, mask);
return mask;
/* Actually do need to check */
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
if (!strcmp(p, model_num))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
return ata_bmdma_mode_filter(adev, mask);
return mask;
}
/**

View File

@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
mask &= ~ ATA_MASK_UDMA;
}
}
return ata_bmdma_mode_filter(dev, mask);
return mask;
}
/**
@ -424,7 +424,7 @@ static int via_port_start(struct ata_port *ap)
struct via_port *vp;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int ret = ata_sff_port_start(ap);
int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;

View File

@ -556,11 +556,7 @@ static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
int rc;
rc = ata_port_start(ap);
if (rc)
return rc;
adma_enter_reg_mode(ap);
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)

View File

@ -682,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct inic_port_priv *pp;
int rc;
/* alloc and initialize private data */
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@ -691,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
ap->private_data = pp;
/* Alloc resources */
rc = ata_port_start(ap);
if (rc)
return rc;
pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
&pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)

View File

@ -717,8 +717,6 @@ static struct ata_port_operations mv6_ops = {
.port_start = mv_port_start,
.port_stop = mv_port_stop,
.mode_filter = ATA_OP_NULL, /* will be removed soon */
};
static struct ata_port_operations mv_iie_ops = {

View File

@ -1156,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap)
if (rc)
return rc;
rc = ata_port_start(ap);
/* we might fallback to bmdma, allocate bmdma resources */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@ -1985,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
struct nv_swncq_port_priv *pp;
int rc;
rc = ata_port_start(ap);
/* we might fallback to bmdma, allocate bmdma resources */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;

View File

@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
struct pdc_port_priv *pp;
int rc;
rc = ata_port_start(ap);
/* we use the same prd table as bmdma, allocate it */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;

View File

@ -504,11 +504,7 @@ static int qs_port_start(struct ata_port *ap)
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
int rc;
rc = ata_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;

View File

@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
rc = ata_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)

View File

@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
rc = ata_pci_bmdma_init(host);
if (rc)
return rc;
ata_pci_bmdma_init(host);
iomap = host->iomap;

View File

@ -1000,7 +1000,6 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_port_start(struct ata_port *ap);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
@ -1616,8 +1615,6 @@ extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
extern void ata_sff_error_handler(struct ata_port *ap);
extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
extern int ata_sff_port_start(struct ata_port *ap);
extern int ata_sff_port_start32(struct ata_port *ap);
extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
#ifdef CONFIG_PCI
extern int ata_pci_sff_init_host(struct ata_host *host);
@ -1632,16 +1629,16 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */
extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
unsigned long xfer_mask);
extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern int ata_bmdma_port_start(struct ata_port *ap);
extern int ata_bmdma_port_start32(struct ata_port *ap);
#ifdef CONFIG_PCI
extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
extern int ata_pci_bmdma_init(struct ata_host *host);
extern void ata_pci_bmdma_init(struct ata_host *host);
#endif /* CONFIG_PCI */
/**