Archived
14
0
Fork 0

tc35815: Whitespace cleanup

Cosmetic TAB/whitespace cleanups and some style cleanups.  No
functional changes.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:
Atsushi Nemoto 2008-04-11 00:25:31 +09:00 committed by Jeff Garzik
parent c6686fe3e4
commit 7f225b427b

View file

@ -81,7 +81,7 @@ static const struct pci_device_id tc35815_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
{0,}
};
MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl);
MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
/* see MODULE_PARM_DESC */
static struct tc35815_options {
@ -183,7 +183,7 @@ struct tc35815_regs {
/* CAM_Ena bit asign ------------------------------------------------------- */
#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
#define CAM_Ena_Bit(index) (1<<(index))
#define CAM_Ena_Bit(index) (1 << (index))
#define CAM_ENTRY_DESTINATION 0
#define CAM_ENTRY_SOURCE 1
#define CAM_ENTRY_MACCTL 20
@ -249,7 +249,7 @@ struct tc35815_regs {
/* Int_En bit asign -------------------------------------------------------- */
#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */
#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
@ -352,8 +352,10 @@ struct BDesc {
/* Tuning parameters */
#define DMA_BURST_SIZE 32
#define TX_THRESHOLD 1024
#define TX_THRESHOLD_MAX 1536 /* used threshold with packet max byte for low pci transfer ability.*/
#define TX_THRESHOLD_KEEP_LIMIT 10 /* setting threshold max value when overrun error occured this count. */
/* used threshold with packet max byte for low pci transfer ability.*/
#define TX_THRESHOLD_MAX 1536
/* setting threshold max value when overrun error occured this count. */
#define TX_THRESHOLD_KEEP_LIMIT 10
/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
#ifdef TC35815_USE_PACKEDBUFFER
@ -441,7 +443,7 @@ struct tc35815_local {
* RX_BUF_NUM BD in Free Buffer FD.
* One Free Buffer BD has ETH_FRAME_LEN data buffer.
*/
void * fd_buf; /* for TxFD, RxFD, FrFD */
void *fd_buf; /* for TxFD, RxFD, FrFD */
dma_addr_t fd_buf_dma;
struct TxFD *tfd_base;
unsigned int tfd_start;
@ -452,7 +454,7 @@ struct tc35815_local {
struct FrFD *fbl_ptr;
#ifdef TC35815_USE_PACKEDBUFFER
unsigned char fbl_curid;
void * data_buf[RX_BUF_NUM]; /* packing */
void *data_buf[RX_BUF_NUM]; /* packing */
dma_addr_t data_buf_dma[RX_BUF_NUM];
struct {
struct sk_buff *skb;
@ -493,13 +495,14 @@ static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
}
#define TC35815_DMA_SYNC_ONDEMAND
static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
{
#ifdef TC35815_DMA_SYNC_ONDEMAND
void *buf;
/* pci_map + pci_dma_sync will be more effective than
* pci_alloc_consistent on some archs. */
if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL)
buf = (void *)__get_free_page(GFP_ATOMIC);
if (!buf)
return NULL;
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
@ -825,7 +828,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
}
#endif
static int __devinit tc35815_init_dev_addr (struct net_device *dev)
static int __devinit tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
@ -847,7 +850,7 @@ static int __devinit tc35815_init_dev_addr (struct net_device *dev)
return 0;
}
static int __devinit tc35815_init_one (struct pci_dev *pdev,
static int __devinit tc35815_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
void __iomem *ioaddr = NULL;
@ -870,7 +873,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
}
/* dev zeroed in alloc_etherdev */
dev = alloc_etherdev (sizeof (*lp));
dev = alloc_etherdev(sizeof(*lp));
if (dev == NULL) {
dev_err(&pdev->dev, "unable to alloc new ethernet\n");
return -ENOMEM;
@ -907,7 +910,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
#endif
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
dev->base_addr = (unsigned long)ioaddr;
INIT_WORK(&lp->restart_work, tc35815_restart_work);
spin_lock_init(&lp->lock);
@ -926,7 +929,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
random_ether_addr(dev->dev_addr);
}
rc = register_netdev (dev);
rc = register_netdev(dev);
if (rc)
goto err_out;
@ -947,23 +950,22 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
err_out_unregister:
unregister_netdev(dev);
err_out:
free_netdev (dev);
free_netdev(dev);
return rc;
}
static void __devexit tc35815_remove_one (struct pci_dev *pdev)
static void __devexit tc35815_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
phy_disconnect(lp->phy_dev);
mdiobus_unregister(&lp->mii_bus);
kfree(lp->mii_bus.irq);
unregister_netdev (dev);
free_netdev (dev);
pci_set_drvdata (pdev, NULL);
unregister_netdev(dev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
}
static int
@ -980,11 +982,17 @@ tc35815_init_queues(struct net_device *dev)
sizeof(struct TxFD) * TX_FD_NUM >
PAGE_SIZE * FD_PAGE_NUM);
if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0)
lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
PAGE_SIZE * FD_PAGE_NUM,
&lp->fd_buf_dma);
if (!lp->fd_buf)
return -ENOMEM;
for (i = 0; i < RX_BUF_NUM; i++) {
#ifdef TC35815_USE_PACKEDBUFFER
if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) {
lp->data_buf[i] =
alloc_rxbuf_page(lp->pci_dev,
&lp->data_buf_dma[i]);
if (!lp->data_buf[i]) {
while (--i >= 0) {
free_rxbuf_page(lp->pci_dev,
lp->data_buf[i],
@ -1027,18 +1035,17 @@ tc35815_init_queues(struct net_device *dev)
#endif
printk("\n");
} else {
for (i = 0; i < FD_PAGE_NUM; i++) {
clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE));
}
for (i = 0; i < FD_PAGE_NUM; i++)
clear_page((void *)((unsigned long)lp->fd_buf +
i * PAGE_SIZE));
}
fd_addr = (unsigned long)lp->fd_buf;
/* Free Descriptors (for Receive) */
lp->rfd_base = (struct RxFD *)fd_addr;
fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
for (i = 0; i < RX_FD_NUM; i++) {
for (i = 0; i < RX_FD_NUM; i++)
lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
}
lp->rfd_cur = lp->rfd_base;
lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
@ -1366,9 +1373,9 @@ tc35815_open(struct net_device *dev)
* This is used if the interrupt line can turned off (shared).
* See 3c503.c for an example of selecting the IRQ at config-time.
*/
if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) {
if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
dev->name, dev))
return -EAGAIN;
}
tc35815_chip_reset(dev);
@ -2050,7 +2057,7 @@ tc35815_txdone(struct net_device *dev)
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
struct TxFD* txhead = &lp->tfd_base[head];
struct TxFD *txhead = &lp->tfd_base[head];
int qlen = (lp->tfd_start + TX_FD_NUM
- lp->tfd_end) % TX_FD_NUM;
@ -2085,7 +2092,7 @@ tc35815_txdone(struct net_device *dev)
* condition, and space has now been made available,
* wake up the queue.
*/
if (netif_queue_stopped(dev) && ! tc35815_tx_full(dev))
if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
netif_wake_queue(dev);
}
@ -2182,8 +2189,7 @@ tc35815_set_multicast_list(struct net_device *dev)
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
if (dev->flags&IFF_PROMISC)
{
if (dev->flags & IFF_PROMISC) {
#ifdef WORKAROUND_100HALF_PROMISC
/* With some (all?) 100MHalf HUB, controller will hang
* if we enabled promiscuous mode before linkup... */
@ -2194,16 +2200,13 @@ tc35815_set_multicast_list(struct net_device *dev)
#endif
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
}
else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3)
{
} else if ((dev->flags & IFF_ALLMULTI) ||
dev->mc_count > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
}
else if(dev->mc_count)
{
struct dev_mc_list* cur_addr = dev->mc_list;
} else if (dev->mc_count) {
struct dev_mc_list *cur_addr = dev->mc_list;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
@ -2218,8 +2221,7 @@ tc35815_set_multicast_list(struct net_device *dev)
}
tc_writel(ena_bits, &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
}
else {
} else {
tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
}