dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
  dca: disable dca on IOAT ver.3.0 multiple-IOH platforms
  netpoll: Disable IRQ around RCU dereference in netpoll_rx
  sctp: Do not reset the packet during sctp_packet_config().
  net/llc: storing negative error codes in unsigned short
  MAINTAINERS: move atlx discussions to netdev
  drivers/net/cxgb3/cxgb3_main.c: prevent reading uninitialized stack memory
  drivers/net/eql.c: prevent reading uninitialized stack memory
  drivers/net/usb/hso.c: prevent reading uninitialized memory
  xfrm: dont assume rcu_read_lock in xfrm_output_one()
  r8169: Handle rxfifo errors on 8168 chips
  3c59x: Remove atomic context inside vortex_{set|get}_wol
  tcp: Prevent overzealous packetization by SWS logic.
  net: RPS needs to depend upon USE_GENERIC_SMP_HELPERS
  phylib: fix PAL state machine restart on resume
  net: use rcu_barrier() in rollback_registered_many
  bonding: correctly process non-linear skbs
  ipv4: enable getsockopt() for IP_NODEFRAG
  ipv4: force_igmp_version ignored when a IGMPv3 query received
  ppp: potential NULL dereference in ppp_mp_explode()
  net/llc: make opt unsigned in llc_ui_setsockopt()
  ...
This commit is contained in:
Linus Torvalds 2010-09-19 11:05:50 -07:00
commit 7d7dee96e1
22 changed files with 136 additions and 35 deletions

View File

@ -1135,7 +1135,7 @@ ATLX ETHERNET DRIVERS
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com>
M: Jie Yang <jie.yang@atheros.com>
L: atl1-devel@lists.sourceforge.net
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained

View File

@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
static LIST_HEAD(dca_domains);
static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
static int dca_providers_blocked;
static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
kfree(domain);
}
static int dca_provider_ioat_ver_3_0(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
}
static void unregister_dca_providers(void)
{
struct dca_provider *dca, *_dca;
struct list_head unregistered_providers;
struct dca_domain *domain;
unsigned long flags;
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
INIT_LIST_HEAD(&unregistered_providers);
spin_lock_irqsave(&dca_lock, flags);
if (list_empty(&dca_domains)) {
spin_unlock_irqrestore(&dca_lock, flags);
return;
}
/* at this point only one domain in the list is expected */
domain = list_first_entry(&dca_domains, struct dca_domain, node);
if (!domain)
return;
list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
list_del(&dca->node);
list_add(&dca->node, &unregistered_providers);
}
dca_free_domain(domain);
spin_unlock_irqrestore(&dca_lock, flags);
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
dca_sysfs_remove_provider(dca);
list_del(&dca->node);
}
}
static struct dca_domain *dca_find_domain(struct pci_bus *rc)
{
struct dca_domain *domain;
@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
domain = dca_find_domain(rc);
if (!domain) {
domain = dca_allocate_domain(rc);
if (domain)
list_add(&domain->node, &dca_domains);
if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
dca_providers_blocked = 1;
} else {
domain = dca_allocate_domain(rc);
if (domain)
list_add(&domain->node, &dca_domains);
}
}
return domain;
@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
}
EXPORT_SYMBOL_GPL(free_dca_provider);
static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
/**
* register_dca_provider - register a dca provider
* @dca - struct created by alloc_dca_provider()
@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
unsigned long flags;
struct dca_domain *domain;
spin_lock_irqsave(&dca_lock, flags);
if (dca_providers_blocked) {
spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
spin_unlock_irqrestore(&dca_lock, flags);
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
spin_unlock_irqrestore(&dca_lock, flags);
if (dca_providers_blocked) {
spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
unregister_dca_providers();
} else {
spin_unlock_irqrestore(&dca_lock, flags);
}
return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);

View File

@ -635,6 +635,9 @@ struct vortex_private {
must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
large_frames:1, /* accept large frames */
handling_irq:1; /* private in_irq indicator */
/* {get|set}_wol operations are already serialized by rtnl.
* no additional locking is required for the enable_wol and acpi_set_WOL()
*/
int drv_flags;
u16 status_enable;
u16 intr_enable;
@ -2939,13 +2942,11 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct vortex_private *vp = netdev_priv(dev);
spin_lock_irq(&vp->lock);
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
if (vp->enable_wol)
wol->wolopts |= WAKE_MAGIC;
spin_unlock_irq(&vp->lock);
}
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@ -2954,13 +2955,11 @@ static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
spin_lock_irq(&vp->lock);
if (wol->wolopts & WAKE_MAGIC)
vp->enable_wol = 1;
else
vp->enable_wol = 0;
acpi_set_WOL(dev);
spin_unlock_irq(&vp->lock);
return 0;
}

View File

@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
if (!(dev->flags & IFF_MASTER))
goto out;
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
goto out;
read_lock(&bond->lock);
slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
orig_dev);

View File

@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
goto out;
}
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
goto out;
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
goto out;

View File

@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
case CHELSIO_GET_QSET_NUM:{
struct ch_reg edata;
memset(&edata, 0, sizeof(struct ch_reg));
edata.cmd = CHELSIO_GET_QSET_NUM;
edata.val = pi->nqsets;
if (copy_to_user(useraddr, &edata, sizeof(edata)))

View File

@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
equalizer_t *eql;
master_config_t mc;
memset(&mc, 0, sizeof(master_config_t));
if (eql_is_master(dev)) {
eql = netdev_priv(dev);
mc.max_slaves = eql->max_slaves;

View File

@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
* may call phy routines that try to grab the same lock, and that may
* lead to a deadlock.
*/
if (phydev->attached_dev)
if (phydev->attached_dev && phydev->adjust_link)
phy_stop_machine(phydev);
if (!mdio_bus_phy_may_suspend(phydev))
@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
return ret;
no_resume:
if (phydev->attached_dev)
if (phydev->attached_dev && phydev->adjust_link)
phy_start_machine(phydev, NULL);
return 0;

View File

@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
pch->speed = pch->chan->speed;
if (pch->chan) {
pch->avail = 1;
navail++;
pch->speed = pch->chan->speed;
} else {
pch->avail = 0;
}
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {

View File

@ -2934,7 +2934,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
.intr_event = SYSErr | LinkChg | RxOverflow |
.intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@ -4625,8 +4625,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
if (unlikely(status & RxFIFOOver) &&
(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
if (unlikely(status & RxFIFOOver)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;

View File

@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial,
struct uart_icount cnow;
struct hso_tiocmget *tiocmget = serial->tiocmget;
memset(&icount, 0, sizeof(struct serial_icounter_struct));
if (!tiocmget)
return -ENOENT;
spin_lock_irq(&serial->serial_lock);

View File

@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
unsigned long flags;
bool ret = false;
rcu_read_lock_bh();
local_irq_save(flags);
npinfo = rcu_dereference_bh(skb->dev->npinfo);
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
goto out;
spin_lock_irqsave(&npinfo->rx_lock, flags);
spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
spin_unlock(&npinfo->rx_lock);
out:
rcu_read_unlock_bh();
local_irq_restore(flags);
return ret;
}

View File

@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
if (tp->max_window && pktsize > (tp->max_window >> 1))
return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
int cutoff;
/* When peer uses tiny windows, there is no use in packetizing
* to sub-MSS pieces for the sake of SWS or making sure there
* are enough packets in the pipe for fast recovery.
*
* On the other hand, for extremely large MSS devices, handling
* smaller than MSS windows in this way does make sense.
*/
if (tp->max_window >= 512)
cutoff = (tp->max_window >> 1);
else
cutoff = tp->max_window;
if (cutoff && pktsize > cutoff)
return max_t(int, cutoff, 68U - tp->tcp_header_len);
else
return pktsize;
}

View File

@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig"
config RPS
boolean
depends on SMP && SYSFS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
menu "Network testing"

View File

@ -4845,7 +4845,7 @@ static void rollback_registered_many(struct list_head *head)
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
synchronize_net();
rcu_barrier();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);

View File

@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
int mark = 0;
if (len == 8) {
if (len == 8 || IGMP_V2_SEEN(in_dev)) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */

View File

@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_HDRINCL:
val = inet->hdrincl;
break;
case IP_NODEFRAG:
val = inet->nodefrag;
break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;

View File

@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
int rc = -EINVAL, opt;
unsigned int opt;
int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))

View File

@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
int __init llc_station_init(void)
{
u16 rc = -ENOBUFS;
int rc = -ENOBUFS;
struct sk_buff *skb;
struct llc_station_state_ev *ev;

View File

@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
error = -EINVAL;
goto err_out;
}
if (!list_empty(&flow->list)) {
error = -EEXIST;
goto err_out;
}
} else {
int i;
unsigned long cl;

View File

@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
sctp_packet_reset(packet);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {

View File

@ -101,7 +101,7 @@ resume:
err = -EHOSTUNREACH;
goto error_nolock;
}
skb_dst_set_noref(skb, dst);
skb_dst_set(skb, dst_clone(dst));
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));