dect
/
linux-2.6
Archived
13
0
Fork 0

qlge: do vlan cleanup

- unify vlan and nonvlan path
- kill qdev->vlgrp and qlge_vlan_rx_register
- allow to turn on/off rx/tx vlan accel via ethtool (set_features)

Signed-off-by: Jiri Pirko <jpirko@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jiri Pirko 2011-07-21 03:24:11 +00:00 committed by David S. Miller
parent f1b553fbe7
commit 18c49b9177
2 changed files with 97 additions and 75 deletions

View File

@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
/*
* General definitions...
@ -2052,7 +2053,7 @@ struct ql_adapter {
struct nic_stats nic_stats;
struct vlan_group *vlgrp;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* PCI Configuration information for this device */
struct pci_dev *pdev;

View File

@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
@ -33,6 +34,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
@ -415,7 +417,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
if (qdev->vlgrp)
if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@ -1507,10 +1509,9 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (qdev->vlgrp && (vlan_id != 0xffff))
vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
else
napi_gro_frags(napi);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id);
napi_gro_frags(napi);
}
/* Process an inbound completion from an rx ring. */
@ -1594,17 +1595,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (qdev->vlgrp && (vlan_id != 0xffff))
vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
else
napi_gro_receive(napi, skb);
} else {
if (qdev->vlgrp && (vlan_id != 0xffff))
vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
else
netif_receive_skb(skb);
}
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
netif_receive_skb(skb);
return;
err_out:
dev_kfree_skb_any(skb);
@ -1707,18 +1703,12 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (qdev->vlgrp && (vlan_id != 0xffff))
vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
vlan_id, skb);
else
napi_gro_receive(&rx_ring->napi, skb);
} else {
if (qdev->vlgrp && (vlan_id != 0xffff))
vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
else
netif_receive_skb(skb);
}
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
static void ql_realign_skb(struct sk_buff *skb, int len)
@ -2028,22 +2018,12 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (qdev->vlgrp &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(vlan_id != 0))
vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
vlan_id, skb);
else
napi_gro_receive(&rx_ring->napi, skb);
} else {
if (qdev->vlgrp &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(vlan_id != 0))
vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
else
netif_receive_skb(skb);
}
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
__vlan_hwaccel_put_tag(skb, vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
/* Process an inbound completion from an rx ring. */
@ -2334,71 +2314,111 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
static void qlge_vlan_mode(struct net_device *ndev, u32 features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
qdev->vlgrp = grp;
if (grp) {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
if (features & NETIF_F_HW_VLAN_RX) {
netif_printk(qdev, ifup, KERN_DEBUG, ndev,
"Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
netif_printk(qdev, ifup, KERN_DEBUG, ndev,
"Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
static u32 qlge_fix_features(struct net_device *ndev, u32 features)
{
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
else
features &= ~NETIF_F_HW_VLAN_TX;
return features;
}
static int qlge_set_features(struct net_device *ndev, u32 features)
{
u32 changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
qlge_vlan_mode(ndev, features);
return 0;
}
static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
}
}
static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
u32 enable_bit = MAC_ADDR_E;
int status;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
}
__qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
u32 enable_bit = 0;
int status;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
__qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
int status;
u16 vid;
if (qdev->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(qdev->vlgrp, vid))
continue;
qlge_vlan_rx_add_vid(qdev->ndev, vid);
}
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
__qlge_vlan_rx_add_vid(qdev, vid);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
@ -4661,7 +4681,8 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
.ndo_vlan_rx_register = qlge_vlan_rx_register,
.ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};