dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
Simon Horman 2008-08-19 17:36:22 +10:00
commit 3f087668c4
49 changed files with 417 additions and 300 deletions

View File

@ -363,6 +363,11 @@ This rule exists because users of the rfkill subsystem expect to get (and set,
when possible) the overall transmitter rfkill state, not of a particular rfkill
line.
5. During suspend, the rfkill class will attempt to soft-block the radio
through a call to rfkill->toggle_radio, and will try to restore its previous
state during resume. After a rfkill class is suspended, it will *not* call
rfkill->toggle_radio until it is resumed.
Example of a WLAN wireless driver connected to the rfkill subsystem:
--------------------------------------------------------------------

View File

@ -1571,6 +1571,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
return half_md4_transform(hash, keyptr->secret);
}
EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,

View File

@ -35,8 +35,8 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#include <linux/if_vlan.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>

View File

@ -64,68 +64,6 @@ struct pcpu_lstats {
unsigned long bytes;
};
/* KISS: just allocate small chunks and copy bits.
*
* So, in fact, this is documentation, explaining what we expect
* of largesending device modulo TCP checksum, which is ignored for loopback.
*/
#ifdef LOOPBACK_TSO
static void emulate_large_send_offload(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
(iph->ihl * 4));
unsigned int doffset = (iph->ihl + th->doff) * 4;
unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
unsigned int offset = 0;
u32 seq = ntohl(th->seq);
u16 id = ntohs(iph->id);
while (offset + doffset < skb->len) {
unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);
if (!nskb)
break;
skb_reserve(nskb, 32);
skb_set_mac_header(nskb, -ETH_HLEN);
skb_reset_network_header(nskb);
iph = ip_hdr(nskb);
skb_copy_to_linear_data(nskb, skb_network_header(skb),
doffset);
if (skb_copy_bits(skb,
doffset + offset,
nskb->data + doffset,
frag_size))
BUG();
skb_put(nskb, doffset + frag_size);
nskb->ip_summed = CHECKSUM_UNNECESSARY;
nskb->dev = skb->dev;
nskb->priority = skb->priority;
nskb->protocol = skb->protocol;
nskb->dst = dst_clone(skb->dst);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
nskb->pkt_type = skb->pkt_type;
th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
iph->tot_len = htons(frag_size + doffset);
iph->id = htons(id);
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
th->seq = htonl(seq);
if (offset + doffset + frag_size < skb->len)
th->fin = th->psh = 0;
netif_rx(nskb);
offset += frag_size;
seq += frag_size;
id++;
}
dev_kfree_skb(skb);
}
#endif /* LOOPBACK_TSO */
/*
* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
skb->protocol = eth_type_trans(skb,dev);
#ifndef LOOPBACK_MUST_CHECKSUM
skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif
#ifdef LOOPBACK_TSO
if (skb_is_gso(skb)) {
@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
#ifdef LOOPBACK_TSO
| NETIF_F_TSO
#endif
| NETIF_F_NO_CSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX

View File

@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
return mask;
}
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
gfp_t gfp)
{
struct sk_buff *skb;
unsigned int i;
skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
if (skb) {
skb_reserve(skb, prepad);
skb_put(skb, len);
return skb;
}
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE)
return NULL;
/* Start with a normal skb, and add pages. */
skb = alloc_skb(prepad + linear, gfp);
if (!skb)
return NULL;
skb_reserve(skb, prepad);
skb_put(skb, linear);
len -= linear;
for (i = 0; i < MAX_SKB_FRAGS; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
f->page = alloc_page(gfp|__GFP_ZERO);
if (!f->page)
break;
f->page_offset = 0;
f->size = PAGE_SIZE;
skb->data_len += PAGE_SIZE;
skb->len += PAGE_SIZE;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
if (len < PAGE_SIZE) {
len = 0;
break;
}
len -= PAGE_SIZE;
}
/* Too large, or alloc fail? */
if (unlikely(len)) {
kfree_skb(skb);
skb = NULL;
}
return skb;
}
/* Get packet from user space buffer */
static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
{
@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
return -EINVAL;
}
if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
tun->dev->stats.rx_dropped++;
return -ENOMEM;
}
if (align)
skb_reserve(skb, align);
if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
@ -748,6 +806,36 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
}
static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct tun_struct *tun = file->private_data;
if (!tun)
return -EBADFD;
DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = 0;
if (ifr->ifr_flags & TUN_TUN_DEV)
ifr->ifr_flags |= IFF_TUN;
else
ifr->ifr_flags |= IFF_TAP;
if (tun->flags & TUN_NO_PI)
ifr->ifr_flags |= IFF_NO_PI;
if (tun->flags & TUN_ONE_QUEUE)
ifr->ifr_flags |= IFF_ONE_QUEUE;
if (tun->flags & TUN_VNET_HDR)
ifr->ifr_flags |= IFF_VNET_HDR;
return 0;
}
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
@ -833,6 +921,15 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
switch (cmd) {
case TUNGETIFF:
ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
if (ret)
return ret;
if (copy_to_user(argp, &ifr, sizeof(ifr)))
return -EFAULT;
break;
case TUNSETNOCSUM:
/* Disable/Enable checksum */
if (arg)

View File

@ -40,7 +40,6 @@
*
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
@ -587,7 +586,6 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
ath5k_stop_hw(sc);
free_irq(pdev->irq, sc);
pci_disable_msi(pdev);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@ -616,12 +614,10 @@ ath5k_pci_resume(struct pci_dev *pdev)
*/
pci_write_config_byte(pdev, 0x41, 0);
pci_enable_msi(pdev);
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
goto err_msi;
goto err_no_irq;
}
err = ath5k_init(sc);
@ -642,8 +638,7 @@ ath5k_pci_resume(struct pci_dev *pdev)
return 0;
err_irq:
free_irq(pdev->irq, sc);
err_msi:
pci_disable_msi(pdev);
err_no_irq:
pci_disable_device(pdev);
return err;
}

View File

@ -5017,7 +5017,11 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
if ((abs(cur_vit_mask - bin)) < 75)
/* workaround for gcc bug #37014 */
volatile int tmp = abs(cur_vit_mask - bin);
if (tmp < 75)
mask_amt = 1;
else
mask_amt = 0;

View File

@ -33,7 +33,6 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/version.h>
#include <linux/firmware.h>
#include <linux/wireless.h>
#include <linux/workqueue.h>
@ -4615,7 +4614,9 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
if (bus->bustype == SSB_BUSTYPE_PCI) {
pdev = bus->host_pci;
if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) ||
IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) ||
IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) ||
IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) ||
IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013))
bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST;
}

View File

@ -157,7 +157,6 @@ that only one external action is invoked at a time.
#include <linux/stringify.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/version.h>
#include <linux/time.h>
#include <linux/firmware.h>
#include <linux/acpi.h>

View File

@ -31,7 +31,6 @@
******************************************************************************/
#include "ipw2200.h"
#include <linux/version.h>
#ifndef KBUILD_EXTMOD

View File

@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>

View File

@ -26,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@ -967,7 +966,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
s = iwl4965_get_sub_band(priv, channel);
if (s >= EEPROM_TX_POWER_BANDS) {
IWL_ERROR("Tx Power can not find channel %d ", channel);
IWL_ERROR("Tx Power can not find channel %d\n", channel);
return -1;
}

View File

@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>

View File

@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>

View File

@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <net/mac80211.h>
struct iwl_priv; /* FIXME: remove */

View File

@ -63,7 +63,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <net/mac80211.h>
@ -146,7 +145,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
{
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
return -ENOENT;
}
return 0;
@ -227,7 +226,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
goto err;
}
@ -254,7 +253,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
IWL_ERROR("Time out reading EEPROM[%d]", addr);
IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
ret = -ETIMEDOUT;
goto done;
}

View File

@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <net/mac80211.h>
#include "iwl-dev.h" /* FIXME: remove */

View File

@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <net/mac80211.h>

View File

@ -207,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
case WLAN_HT_CAP_MIMO_PS_DISABLED:
break;
default:
IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
break;
}
@ -969,7 +969,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
return priv->hw_params.bcast_sta_id;
default:
IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
return priv->hw_params.bcast_sta_id;
}
}

View File

@ -493,7 +493,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
/* Alloc keep-warm buffer */
ret = iwl_kw_alloc(priv);
if (ret) {
IWL_ERROR("Keep Warm allocation failed");
IWL_ERROR("Keep Warm allocation failed\n");
goto error_kw;
}
spin_lock_irqsave(&priv->lock, flags);
@ -1463,7 +1463,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
if (scd_flow >= priv->hw_params.max_txq_num) {
IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
return;
}

View File

@ -29,7 +29,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@ -1558,7 +1557,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
return -ENOENT;
}
@ -1583,7 +1582,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
}
if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
IWL_ERROR("Time out reading EEPROM[%d]", addr);
IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
return -ETIMEDOUT;
}
e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
@ -2507,7 +2506,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
return priv->hw_setting.bcast_sta_id;
default:
IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
return priv->hw_setting.bcast_sta_id;
}
}

View File

@ -413,12 +413,12 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
last_addr = range->end_addr;
__skb_unlink(entry, &priv->tx_queue);
memset(&info->status, 0, sizeof(info->status));
priv->tx_stats[skb_get_queue_mapping(skb)].len--;
entry_hdr = (struct p54_control_hdr *) entry->data;
entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
pad = entry_data->align[0];
priv->tx_stats[entry_data->hw_queue - 4].len--;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
if (!(payload->status & 0x01))
info->flags |= IEEE80211_TX_STAT_ACK;
@ -557,6 +557,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_control_allocdata *txhdr;
size_t padding, len;
u8 rate;
u8 cts_rate = 0x20;
current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
if (unlikely(current_queue->len > current_queue->limit))
@ -581,28 +582,28 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1);
hdr->retry1 = hdr->retry2 = info->control.retry_limit;
memset(txhdr->wep_key, 0x0, 16);
txhdr->padding = 0;
txhdr->padding2 = 0;
/* TODO: add support for alternate retry TX rates */
rate = ieee80211_get_tx_rate(dev, info)->hw_value;
if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) {
rate |= 0x10;
if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
cts_rate |= 0x10;
}
if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
rate |= 0x40;
else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
} else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
rate |= 0x20;
cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
}
memset(txhdr->rateset, rate, 8);
txhdr->wep_key_present = 0;
txhdr->wep_key_len = 0;
txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4);
txhdr->magic4 = 0;
txhdr->antenna = (info->antenna_sel_tx == 0) ?
txhdr->key_type = 0;
txhdr->key_len = 0;
txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
2 : info->antenna_sel_tx - 1;
txhdr->output_power = 0x7f; // HW Maximum
txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23));
txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
0 : cts_rate;
if (padding)
txhdr->align[0] = padding;
@ -836,10 +837,21 @@ static int p54_start(struct ieee80211_hw *dev)
struct p54_common *priv = dev->priv;
int err;
if (!priv->cached_vdcf) {
priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+
priv->tx_hdr_len + sizeof(struct p54_control_hdr),
GFP_KERNEL);
if (!priv->cached_vdcf)
return -ENOMEM;
}
err = priv->open(dev);
if (!err)
priv->mode = IEEE80211_IF_TYPE_MNTR;
p54_init_vdcf(dev);
return err;
}
@ -1019,15 +1031,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
sizeof(struct p54_tx_control_allocdata);
priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) +
priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL);
if (!priv->cached_vdcf) {
ieee80211_free_hw(dev);
return NULL;
}
p54_init_vdcf(dev);
mutex_init(&priv->conf_mutex);
return dev;

View File

@ -183,16 +183,16 @@ struct p54_frame_sent_hdr {
struct p54_tx_control_allocdata {
u8 rateset[8];
u16 padding;
u8 wep_key_present;
u8 wep_key_len;
u8 wep_key[16];
__le32 frame_type;
u32 padding2;
__le16 magic4;
u8 antenna;
u8 unalloc0[2];
u8 key_type;
u8 key_len;
u8 key[16];
u8 hw_queue;
u8 unalloc1[9];
u8 tx_antenna;
u8 output_power;
__le32 magic5;
u8 cts_rate;
u8 unalloc2[3];
u8 align[0];
} __attribute__ ((packed));

View File

@ -109,7 +109,17 @@ static void p54u_rx_cb(struct urb *urb)
urb->context = skb;
skb_queue_tail(&priv->rx_queue, skb);
} else {
if (!priv->hw_type)
skb_push(skb, sizeof(struct net2280_tx_hdr));
skb_reset_tail_pointer(skb);
skb_trim(skb, 0);
if (urb->transfer_buffer != skb_tail_pointer(skb)) {
/* this should not happen */
WARN_ON(1);
urb->transfer_buffer = skb_tail_pointer(skb);
}
skb_queue_tail(&priv->rx_queue, skb);
}

View File

@ -173,10 +173,10 @@ struct rxdone_entry_desc {
* frame transmission failed due to excessive retries.
*/
enum txdone_entry_desc_flags {
TXDONE_UNKNOWN = 1 << 0,
TXDONE_SUCCESS = 1 << 1,
TXDONE_FAILURE = 1 << 2,
TXDONE_EXCESSIVE_RETRY = 1 << 3,
TXDONE_UNKNOWN,
TXDONE_SUCCESS,
TXDONE_FAILURE,
TXDONE_EXCESSIVE_RETRY,
};
/**

View File

@ -181,6 +181,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* (Only indirectly by looking at the failed TX counters
* in the register).
*/
txdesc.flags = 0;
if (!urb->status)
__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
else

View File

@ -40,6 +40,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Netgear */
{USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B},
/* HP */
{USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
/* Sitecom */

View File

@ -1165,15 +1165,19 @@ EXPORT_SYMBOL(ssb_dma_translation);
int ssb_dma_set_mask(struct ssb_device *dev, u64 mask)
{
#ifdef CONFIG_SSB_PCIHOST
int err;
#endif
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
#ifdef CONFIG_SSB_PCIHOST
err = pci_set_dma_mask(dev->bus->host_pci, mask);
if (err)
return err;
err = pci_set_consistent_dma_mask(dev->bus->host_pci, mask);
return err;
#endif
case SSB_BUSTYPE_SSB:
return dma_set_mask(dev->dev, mask);
default:
@ -1188,6 +1192,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
#ifdef CONFIG_SSB_PCIHOST
if (gfp_flags & GFP_DMA) {
/* Workaround: The PCI API does not support passing
* a GFP flag. */
@ -1195,6 +1200,7 @@ void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
size, dma_handle, gfp_flags);
}
return pci_alloc_consistent(dev->bus->host_pci, size, dma_handle);
#endif
case SSB_BUSTYPE_SSB:
return dma_alloc_coherent(dev->dev, size, dma_handle, gfp_flags);
default:
@ -1210,6 +1216,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
#ifdef CONFIG_SSB_PCIHOST
if (gfp_flags & GFP_DMA) {
/* Workaround: The PCI API does not support passing
* a GFP flag. */
@ -1220,6 +1227,7 @@ void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
pci_free_consistent(dev->bus->host_pci, size,
vaddr, dma_handle);
return;
#endif
case SSB_BUSTYPE_SSB:
dma_free_coherent(dev->dev, size, vaddr, dma_handle);
return;

View File

@ -45,6 +45,7 @@
#define TUNGETFEATURES _IOR('T', 207, unsigned int)
#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
#define TUNSETTXFILTER _IOW('T', 209, unsigned int)
#define TUNGETIFF _IOR('T', 210, unsigned int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001

View File

@ -1452,6 +1452,10 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from,
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
struct iovec *iov);
extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
int offset,
struct iovec *from,
int len);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
unsigned int flags);

View File

@ -708,10 +708,7 @@ enum ieee80211_tkip_key_type {
* rely on the host system for such buffering. This option is used
* to configure the IEEE 802.11 upper layer to buffer broadcast and
* multicast frames when there are power saving stations so that
* the driver can fetch them with ieee80211_get_buffered_bc(). Note
* that not setting this flag works properly only when the
* %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because
* otherwise the stack will not know when the DTIM beacon was sent.
* the driver can fetch them with ieee80211_get_buffered_bc().
*
* @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
* Hardware is not capable of short slot operation on the 2.4 GHz band.
@ -1099,10 +1096,8 @@ enum ieee80211_ampdu_mlme_action {
* See the section "Frame filtering" for more information.
* This callback must be implemented and atomic.
*
* @set_tim: Set TIM bit. If the hardware/firmware takes care of beacon
* generation (that is, %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is set)
* mac80211 calls this function when a TIM bit must be set or cleared
* for a given AID. Must be atomic.
* @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
* must be set or cleared for a given AID. Must be atomic.
*
* @set_key: See the section "Hardware crypto acceleration"
* This callback can sleep, and is only called between add_interface

View File

@ -27,6 +27,7 @@ enum qdisc_state_t
{
__QDISC_STATE_RUNNING,
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
};
struct qdisc_size_table {
@ -60,7 +61,6 @@ struct Qdisc
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct rcu_head q_rcu;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);

View File

@ -148,11 +148,16 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
}
static struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
.set_sg = br_set_sg,
.set_tx_csum = br_set_tx_csum,
.set_tso = br_set_tso,
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = br_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = br_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = br_set_tso,
.get_ufo = ethtool_op_get_ufo,
.get_flags = ethtool_op_get_flags,
};
void br_dev_setup(struct net_device *dev)

View File

@ -339,6 +339,93 @@ fault:
return -EFAULT;
}
/**
* skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
* @skb: buffer to copy
* @offset: offset in the buffer to start copying to
* @from: io vector to copy to
* @len: amount of data to copy to buffer from iovec
*
* Returns 0 or -EFAULT.
* Note: the iovec is modified during the copy.
*/
int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
struct iovec *from, int len)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
if (memcpy_fromiovec(skb->data + offset, from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = frag->page;
if (copy > len)
copy = len;
vaddr = kmap(page);
err = memcpy_fromiovec(vaddr + frag->page_offset +
offset - start, from, copy);
kunmap(page);
if (err)
goto fault;
if (!(len -= copy))
return 0;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
if (skb_copy_datagram_from_iovec(list,
offset - start,
from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
start = end;
}
}
if (!len)
return 0;
fault:
return -EFAULT;
}
EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)

View File

@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
}
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
q->next_sched = sd->output_queue;
sd->output_queue = q;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
void __netif_schedule(struct Qdisc *q)
{
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
struct softnet_data *sd;
unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
q->next_sched = sd->output_queue;
sd->output_queue = q;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
}
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
__netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
@ -1800,9 +1804,13 @@ gso:
spin_lock(root_lock);
rc = qdisc_enqueue_root(skb, q);
qdisc_run(q);
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
kfree_skb(skb);
rc = NET_XMIT_DROP;
} else {
rc = qdisc_enqueue_root(skb, q);
qdisc_run(q);
}
spin_unlock(root_lock);
goto out;
@ -1974,15 +1982,15 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED, &q->state);
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
smp_mb__before_clear_bit();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
__netif_schedule(q);
__netif_reschedule(q);
}
}
}
@ -2084,7 +2092,8 @@ static int ing_filter(struct sk_buff *skb)
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
result = qdisc_enqueue_root(skb, q);
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}

View File

@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
segs = nskb;
tail = nskb;
nskb->dev = skb->dev;
skb_copy_queue_mapping(nskb, skb);
nskb->priority = skb->priority;
nskb->protocol = skb->protocol;
nskb->vlan_tci = skb->vlan_tci;
nskb->dst = dst_clone(skb->dst);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
nskb->pkt_type = skb->pkt_type;
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
skb_reserve(nskb, headroom);
@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
doffset);
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
len, 0);
@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
frag = skb_shinfo(nskb)->frags;
k = 0;
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum = skb->csum;
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);

View File

@ -411,12 +411,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
struct dccp_sock *dp = dccp_sk(sk);
long tstamp = dccp_timestamp();
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
WARN_ON(sk->sk_send_head == NULL);
__kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
@ -441,6 +435,12 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
DCCP_ACKVEC_STATE_RECEIVED))
goto out_invalid_packet; /* FIXME: change error code */
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
WARN_ON(sk->sk_send_head == NULL);
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_update_gsr(sk, dp->dccps_isr);
/*

View File

@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in,
(info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
if (ret && info->dest)
ret &= match_type(dev, iph->daddr, info->dest) ^
(info->flags & IPT_ADDRTYPE_INVERT_DEST);
!!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
return ret;
}

View File

@ -73,9 +73,13 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.all) - min + 1;
}
off = *rover;
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
off = net_random();
off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
maniptype == IP_NAT_MANIP_SRC
? tuple->dst.u.all
: tuple->src.u.all);
else
off = *rover;
for (i = 0; i < range_size; i++, off++) {
*portptr = htons(min + off % range_size);

View File

@ -911,7 +911,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
} else {
if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
@ -921,7 +921,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (np->rxopt.bits.rxoinfo) {
struct in6_pktinfo src_info;
src_info.ipi6_ifindex = np->mcast_oif;
src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}

View File

@ -2103,6 +2103,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
return;
}
/* update new sta with its last rx activity */
sta->last_rx = jiffies;
}
/*

View File

@ -968,7 +968,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
/* need to zero data of old helper */
memset(&help->help, 0, sizeof(help->help));
} else {
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help == NULL)
return -ENOMEM;
}
@ -1136,29 +1136,10 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
ct->status |= IPS_CONFIRMED;
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0)
goto err;
}
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
goto err;
}
nf_ct_acct_ext_add(ct, GFP_KERNEL);
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
#endif
rcu_read_lock();
helper = __nf_ct_helper_find(rtuple);
if (helper) {
help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help == NULL) {
rcu_read_unlock();
err = -ENOMEM;
@ -1168,6 +1149,29 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
rcu_assign_pointer(help->helper, helper);
}
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
if (err < 0) {
rcu_read_unlock();
goto err;
}
}
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0) {
rcu_read_unlock();
goto err;
}
}
nf_ct_acct_ext_add(ct, GFP_KERNEL);
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
#endif
/* setup master conntrack: this is a confirmed expectation */
if (master_ct) {
__set_bit(IPS_EXPECTED_BIT, &ct->status);

View File

@ -150,6 +150,8 @@ static void update_rfkill_state(struct rfkill *rfkill)
* calls and handling all the red tape such as issuing notifications
* if the call is successful.
*
* Suspended devices are not touched at all, and -EAGAIN is returned.
*
* Note that the @force parameter cannot override a (possibly cached)
* state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
* RFKILL_STATE_HARD_BLOCKED implements either get_state() or
@ -168,6 +170,9 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
int retval = 0;
enum rfkill_state oldstate, newstate;
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
return -EBUSY;
oldstate = rfkill->state;
if (rfkill->get_state && !force &&
@ -214,7 +219,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
*
* This function toggles the state of all switches of given type,
* unless a specific switch is claimed by userspace (in which case,
* that switch is left alone).
* that switch is left alone) or suspended.
*/
void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
{
@ -239,8 +244,8 @@ EXPORT_SYMBOL(rfkill_switch_all);
/**
* rfkill_epo - emergency power off all transmitters
*
* This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring
* everything in its path but rfkill_mutex and rfkill->mutex.
* This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
* ignoring everything in its path but rfkill_mutex and rfkill->mutex.
*/
void rfkill_epo(void)
{
@ -458,13 +463,14 @@ static int rfkill_resume(struct device *dev)
if (dev->power.power_state.event != PM_EVENT_ON) {
mutex_lock(&rfkill->mutex);
dev->power.power_state.event = PM_EVENT_ON;
/* restore radio state AND notify everybody */
rfkill_toggle_radio(rfkill, rfkill->state, 1);
mutex_unlock(&rfkill->mutex);
}
dev->power.power_state = PMSG_ON;
return 0;
}
#else

View File

@ -27,6 +27,7 @@
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
#include <linux/lockdep.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@ -426,7 +427,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
__netif_schedule(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
}
@ -637,11 +638,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid
if (new || old)
qdisc_notify(skb, n, clid, old, new);
if (old) {
spin_lock_bh(&old->q.lock);
if (old)
qdisc_destroy(old);
spin_unlock_bh(&old->q.lock);
}
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
@ -707,6 +705,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
return err;
}
/* lockdep annotation is needed for ingress; egress gets it only for name */
static struct lock_class_key qdisc_tx_lock;
static struct lock_class_key qdisc_rx_lock;
/*
Allocate and initialize new qdisc.
@ -767,6 +769,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
@ -774,6 +777,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if (handle == 0)
goto err_out3;
}
lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
}
sch->handle = handle;
@ -1084,20 +1088,13 @@ create_n_graft:
}
graft:
if (1) {
spinlock_t *root_lock;
err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
if (err) {
if (q) {
root_lock = qdisc_root_lock(q);
spin_lock_bh(root_lock);
qdisc_destroy(q);
spin_unlock_bh(root_lock);
}
return err;
}
err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
if (err) {
if (q)
qdisc_destroy(q);
return err;
}
return 0;
}

View File

@ -654,7 +654,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
}
sch->flags &= ~TCQ_F_THROTTLED;
__netif_schedule(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}

View File

@ -518,14 +518,17 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
/* this is the rcu callback function to clean up a qdisc when there
* are no further references to it */
static void __qdisc_destroy(struct rcu_head *head)
void qdisc_destroy(struct Qdisc *qdisc)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
const struct Qdisc_ops *ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
if (qdisc->parent)
list_del(&qdisc->list);
#ifdef CONFIG_NET_SCHED
qdisc_put_stab(qdisc->stab);
#endif
@ -542,20 +545,6 @@ static void __qdisc_destroy(struct rcu_head *head)
kfree((char *) qdisc - qdisc->padded);
}
/* Under qdisc_lock(qdisc) and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
if (qdisc->parent)
list_del(&qdisc->list);
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}
EXPORT_SYMBOL(qdisc_destroy);
static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
@ -597,6 +586,9 @@ static void transition_one_qdisc(struct net_device *dev,
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
if (!(new_qdisc->flags & TCQ_F_BUILTIN))
clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
@ -640,6 +632,9 @@ static void dev_deactivate_queue(struct net_device *dev,
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
if (!(qdisc->flags & TCQ_F_BUILTIN))
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
dev_queue->qdisc = qdisc_default;
qdisc_reset(qdisc);
@ -647,7 +642,7 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
static bool some_qdisc_is_busy(struct net_device *dev, int lock)
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@ -661,14 +656,12 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
q = dev_queue->qdisc_sleeping;
root_lock = qdisc_lock(q);
if (lock)
spin_lock_bh(root_lock);
spin_lock_bh(root_lock);
val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
test_bit(__QDISC_STATE_SCHED, &q->state));
if (lock)
spin_unlock_bh(root_lock);
spin_unlock_bh(root_lock);
if (val)
return true;
@ -678,8 +671,6 @@ static bool some_qdisc_is_busy(struct net_device *dev, int lock)
void dev_deactivate(struct net_device *dev)
{
bool running;
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
@ -689,25 +680,8 @@ void dev_deactivate(struct net_device *dev)
synchronize_rcu();
/* Wait for outstanding qdisc_run calls. */
do {
while (some_qdisc_is_busy(dev, 0))
yield();
/*
* Double-check inside queue lock to ensure that all effects
* of the queue run are visible when we return.
*/
running = some_qdisc_is_busy(dev, 1);
/*
* The running flag should never be set at this point because
* we've already set dev->qdisc to noop_qdisc *inside* the same
* pair of spin locks. That is, if any qdisc_run starts after
* our initial test it should see the noop_qdisc and then
* clear the RUNNING bit before dropping the queue lock. So
* if it is set here then we've found a bug.
*/
} while (WARN_ON_ONCE(running));
while (some_qdisc_is_busy(dev))
yield();
}
static void dev_init_scheduler_queue(struct net_device *dev,
@ -736,14 +710,10 @@ static void shutdown_scheduler_queue(struct net_device *dev,
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
spinlock_t *root_lock = qdisc_lock(qdisc);
dev_queue->qdisc = qdisc_default;
dev_queue->qdisc_sleeping = qdisc_default;
spin_lock_bh(root_lock);
qdisc_destroy(qdisc);
spin_unlock_bh(root_lock);
}
}

View File

@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
return NET_XMIT_DROP;
return ret;
} else {
cl->bstats.packets +=
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
return NET_XMIT_DROP;
return ret;
} else
htb_activate(q, cl);

View File

@ -113,11 +113,11 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->qstats.requeues++;
return 0;
return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
return NET_XMIT_DROP;
return ret;
}

View File

@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
#endif
kfree_skb(skb);
return NET_XMIT_DROP;
}
if (qdisc_pkt_len(skb) > q->max_size)
return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) {