dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "This is what we usually expect at this stage of the game, lots of
  little things, mostly in drivers.  With the occasional 'oops didn't
  mean to do that' kind of regressions in the core code."

 1) Uninitialized data in __ip_vs_get_timeouts(), from Arnd Bergmann

 2) Reject invalid ACK sequences in Fast Open sockets, from Jerry Chu.

 3) Lost error code on return from _rtl_usb_receive(), from Christian
    Lamparter.

 4) Fix reset resume on USB rt2x00, from Stanislaw Gruszka.

 5) Release resources on error in pch_gbe driver, from Veaceslav Falico.

 6) Default hop limit not set correctly in ip6_template_metrics[], fix
    from Li RongQing.

 7) Gianfar PTP code requests wrong kind of resource during probe, fix
    from Wei Yang.

 8) Fix VHOST net driver on big-endian, from Michael S Tsirkin.

 9) Mallenox driver bug fixes from Jack Morgenstein, Or Gerlitz, Moni
    Shoua, Dotan Barak, and Uri Habusha.

10) usbnet leaks memory on TX path, fix from Hemant Kumar.

11) Use socket state test, rather than presence of FIN bit packet, to
    determine FIONREAD/SIOCINQ value.  Fix from Eric Dumazet.

12) Fix cxgb4 build failure, from Vipul Pandya.

13) Provide a SYN_DATA_ACKED state to complement SYN_FASTOPEN in socket
    info dumps.  From Yuchung Cheng.

14) Fix leak of security path in kfree_skb_partial().  Fix from Eric
    Dumazet.

15) Handle RX FIFO overflows more resiliently in pch_gbe driver, from
    Veaceslav Falico.

16) Fix MAINTAINERS file pattern for networking drivers, from Jean
    Delvare.

17) Add iPhone5 IDs to IPHETH driver, from Jay Purohit.

18) VLAN device type change restriction is too strict, and should not
    trigger for the automatically generated vlan0 device.  Fix from Jiri
    Pirko.

19) Make PMTU/redirect flushing work properly again in ipv4, from
    Steffen Klassert.

20) Fix memory corruptions by using kfree_rcu() in netlink_release().
    From Eric Dumazet.

21) More qmi_wwan device IDs, from Bjørn Mork.

22) Fix unintentional change of SNAT/DNAT hooks in generic NAT
    infrastructure, from Elison Niven.

23) Fix 3.6.x regression in xt_TEE netfilter module, from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (57 commits)
  tilegx: fix some issues in the SW TSO support
  qmi_wwan/cdc_ether: move Novatel 551 and E362 to qmi_wwan
  net: usb: Fix memory leak on Tx data path
  net/mlx4_core: Unmap UAR also in the case of error flow
  net/mlx4_en: Don't use vlan tag value as an indication for vlan presence
  net/mlx4_en: Fix double-release-range in tx-rings
  bas_gigaset: fix pre_reset handling
  vhost: fix mergeable bufs on BE hosts
  gianfar_ptp: use iomem, not ioports resource tree in probe
  ipv6: Set default hoplimit as zero.
  NET_VENDOR_TI: make available for am33xx as well
  pch_gbe: fix error handling in pch_gbe_up()
  b43: Fix oops on unload when firmware not found
  mwifiex: clean up scan state on error
  mwifiex: return -EBUSY if specific scan request cannot be honored
  brcmfmac: fix potential NULL dereference
  Revert "ath9k_hw: Updated AR9003 tx gain table for 5GHz"
  ath9k_htc: Add PID/VID for a Ubiquiti WiFiStation
  rt2x00: usb: fix reset resume
  rtlwifi: pass rx setup error code to caller
  ...
This commit is contained in:
Linus Torvalds 2012-10-26 15:00:48 -07:00
commit e657e078d3
63 changed files with 724 additions and 384 deletions

View File

@ -5062,7 +5062,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*
F: include/linux/*device.h
F: include/linux/netdevice.h
F: include/linux/arcdevice.h
F: include/linux/etherdevice.h
F: include/linux/fcdevice.h
F: include/linux/fddidevice.h
F: include/linux/hippidevice.h
F: include/linux/inetdevice.h
NETXEN (1/10) GbE SUPPORT
M: Sony Chacko <sony.chacko@qlogic.com>

View File

@ -158,9 +158,10 @@ static int bcma_register_cores(struct bcma_bus *bus)
static void bcma_unregister_cores(struct bcma_bus *bus)
{
struct bcma_device *core;
struct bcma_device *core, *tmp;
list_for_each_entry(core, &bus->cores, list) {
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
if (core->dev_registered)
device_unregister(&core->dev);
}

View File

@ -617,7 +617,13 @@ static void int_in_work(struct work_struct *work)
if (rc == 0)
/* success, resubmit interrupt read URB */
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc != 0 && rc != -ENODEV) {
switch (rc) {
case 0: /* success */
case -ENODEV: /* device gone */
case -EINVAL: /* URB already resubmitted, or terminal badness */
break;
default: /* failure: try to recover by resetting the device */
dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
if (rc == 0) {
@ -2442,7 +2448,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
}
/* gigaset_suspend
* This function is called before the USB connection is suspended.
* This function is called before the USB connection is suspended
* or before the USB device is reset.
* In the latter case, message == PMSG_ON.
*/
static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
{
@ -2498,7 +2506,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
del_timer_sync(&ucs->timer_atrdy);
del_timer_sync(&ucs->timer_cmd_in);
del_timer_sync(&ucs->timer_int_in);
cancel_work_sync(&ucs->int_in_wq);
/* don't try to cancel int_in_wq from within reset as it
* might be the one requesting the reset
*/
if (message.event != PM_EVENT_ON)
cancel_work_sync(&ucs->int_in_wq);
gig_dbg(DEBUG_SUSPEND, "suspend complete");
return 0;

View File

@ -144,9 +144,22 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
/* FLEXCAN hardware feature flags */
/*
* FLEXCAN hardware feature flags
*
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT
* Filter? connected?
* MX25 FlexCAN2 03.00.00.00 no no
* MX28 FlexCAN2 03.00.04.00 yes yes
* MX35 FlexCAN2 03.00.00.00 no no
* MX53 FlexCAN2 03.00.00.00 yes no
* MX6s FlexCAN3 10.00.12.00 yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */
#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
/* Structure of the message buffer */
struct flexcan_mb {
@ -205,7 +218,7 @@ static struct flexcan_devtype_data fsl_p1010_devtype_data = {
};
static struct flexcan_devtype_data fsl_imx28_devtype_data;
static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
.features = FLEXCAN_HAS_V10_FEATURES,
};
static const struct can_bittiming_const flexcan_bittiming_const = {

View File

@ -30,9 +30,10 @@
#include "sja1000.h"
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");
MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
@ -64,7 +65,11 @@ struct peak_pci_chan {
#define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */
#define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */
#define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */
#define PEAK_MPCI_DEVICE_ID 0x0008 /* The miniPCI slot cards */
#define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */
#define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */
#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */
#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */
#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */
#define PEAK_PCI_CHAN_MAX 4
@ -76,6 +81,10 @@ static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = {
{PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#ifdef CONFIG_CAN_PEAK_PCIEC
{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
#endif

View File

@ -251,6 +251,8 @@ struct adapter_params {
unsigned char rev; /* chip revision */
unsigned char offload;
unsigned char bypass;
unsigned int ofldq_wr_cred;
};
@ -642,6 +644,23 @@ extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
static inline int is_bypass(struct adapter *adap)
{
return adap->params.bypass;
}
static inline int is_bypass_device(int device)
{
/* this should be set based upon device capabilities */
switch (device) {
case 0x440b:
case 0x440c:
return 1;
default:
return 0;
}
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;

View File

@ -3513,18 +3513,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
#ifndef CONFIG_CHELSIO_T4_OFFLOAD
/*
* If we're a pure NIC driver then disable all offloading facilities.
* This will allow the firmware to optimize aspects of the hardware
* configuration which will result in improved performance.
*/
caps_cmd.ofldcaps = 0;
caps_cmd.iscsicaps = 0;
caps_cmd.rdmacaps = 0;
caps_cmd.fcoecaps = 0;
#endif
if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
if (!vf_acls)
caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
@ -3745,6 +3733,7 @@ static int adap_init0(struct adapter *adap)
u32 v, port_vec;
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
int reset = 1, j;
/*
@ -3898,6 +3887,9 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
if (is_bypass_device(adap->pdev->device))
adap->params.bypass = 1;
/*
* Grab some of our basic fundamental operating parameters.
*/
@ -3940,13 +3932,12 @@ static int adap_init0(struct adapter *adap)
adap->tids.aftid_end = val[1];
}
#ifdef CONFIG_CHELSIO_T4_OFFLOAD
/*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
memset(&caps_cmd, 0, sizeof(caps_cmd));
caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ);
caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
@ -3991,15 +3982,6 @@ static int adap_init0(struct adapter *adap)
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
params[0] = FW_PARAM_PFVF(ETHOFLD_START);
params[1] = FW_PARAM_PFVF(ETHOFLD_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
params, val);
if ((val[0] != val[1]) && (ret >= 0)) {
adap->tids.uotid_base = val[0];
adap->tids.nuotids = val[1] - val[0] + 1;
}
adap->params.offload = 1;
}
if (caps_cmd.rdmacaps) {
@ -4048,7 +4030,6 @@ static int adap_init0(struct adapter *adap)
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
/*
* These are finalized by FW initialization, load their values now.

View File

@ -102,6 +102,9 @@ struct tid_info {
unsigned int ftid_base;
unsigned int aftid_base;
unsigned int aftid_end;
/* Server filter region */
unsigned int sftid_base;
unsigned int nsftids;
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union aopen_entry *afree;

View File

@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
pr_err("no resource\n");
goto no_resource;
}
if (request_resource(&ioport_resource, etsects->rsrc)) {
if (request_resource(&iomem_resource, etsects->rsrc)) {
pr_err("resource busy\n");
goto no_resource;
}

View File

@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory

View File

@ -837,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
}
static void mlx4_unmap_uar(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i]) {
iounmap(priv->eq_table.uar_map[i]);
priv->eq_table.uar_map[i] = NULL;
}
}
static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
u8 intr, struct mlx4_eq *eq)
{
@ -1201,6 +1213,7 @@ err_out_unmap:
mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
err_out_free:
@ -1225,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
iounmap(priv->eq_table.uar_map[i]);
mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
kfree(priv->eq_table.uar_map);

View File

@ -338,26 +338,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
pr_err("Error: busy bit is not cleared\n");
}
/**
* pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
* @reg: Pointer of register
* @busy: Busy bit
*/
static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
{
u32 tmp;
int ret = -1;
/* wait busy */
tmp = 20;
while ((ioread32(reg) & bit) && --tmp)
udelay(5);
if (!tmp)
pr_err("Error: busy bit is not cleared\n");
else
ret = 0;
return ret;
}
/**
* pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure
@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
return;
}
static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
{
/* Read the MAC addresses. and store to the private data */
pch_gbe_mac_read_mac_addr(hw);
iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
/* Setup the MAC addresses */
pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
return;
u32 rctl;
/* Disables Receive MAC */
rctl = ioread32(&hw->reg->MAC_RX_EN);
iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
}
static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
{
u32 rctl;
/* Enables Receive MAC */
rctl = ioread32(&hw->reg->MAC_RX_EN);
iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
}
/**
@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
{
struct pch_gbe_hw *hw = &adapter->hw;
u32 rdba, rdlen, rctl, rxdma;
u32 rdba, rdlen, rxdma;
pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
(unsigned long long)adapter->rx_ring->dma,
@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
pch_gbe_mac_force_mac_fc(hw);
/* Disables Receive MAC */
rctl = ioread32(&hw->reg->MAC_RX_EN);
iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
pch_gbe_disable_mac_rx(hw);
/* Disables Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
{
struct pch_gbe_hw *hw = &adapter->hw;
u32 rxdma;
u16 value;
int ret;
/* Disable Receive DMA */
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma &= ~PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
/* Wait Rx DMA BUS is IDLE */
ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
if (ret) {
/* Disable Bus master */
pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
value &= ~PCI_COMMAND_MASTER;
pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
/* Stop Receive */
pch_gbe_mac_reset_rx(hw);
/* Enable Bus master */
value |= PCI_COMMAND_MASTER;
pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
} else {
/* Stop Receive */
pch_gbe_mac_reset_rx(hw);
}
/* reprogram multicast address register after reset */
pch_gbe_set_multi(adapter->netdev);
}
static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
{
u32 rxdma;
@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
rxdma = ioread32(&hw->reg->DMA_CTRL);
rxdma |= PCH_GBE_RX_DMA_EN;
iowrite32(rxdma, &hw->reg->DMA_CTRL);
/* Enables Receive */
iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
return;
}
/**
@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN);
pch_gbe_stop_receive(adapter);
pch_gbe_disable_dma_rx(&adapter->hw);
int_st |= ioread32(&hw->reg->INT_ST);
int_st = int_st & ioread32(&hw->reg->INT_EN);
}
@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
int err;
int err = -EINVAL;
/* Ensure we have a valid MAC */
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
pr_err("Error: Invalid MAC address\n");
return -EINVAL;
goto out;
}
/* hardware has been reset, we need to reload some things */
@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
err = pch_gbe_request_irq(adapter);
if (err) {
pr_err("Error: can't bring device up\n");
return err;
pr_err("Error: can't bring device up - irq request failed\n");
goto out;
}
err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
if (err) {
pr_err("Error: can't bring device up\n");
return err;
pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
goto freeirq;
}
pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len;
pch_gbe_start_receive(&adapter->hw);
pch_gbe_enable_dma_rx(&adapter->hw);
pch_gbe_enable_mac_rx(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies);
@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
netif_start_queue(adapter->netdev);
return 0;
freeirq:
pch_gbe_free_irq(adapter);
out:
return err;
}
/**
@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
u32 int_en;
pr_debug("budget : %d\n", budget);
@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
if (poll_end_flag) {
napi_complete(napi);
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
}
pch_gbe_irq_enable(adapter);
} else
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
int_en = ioread32(&adapter->hw.reg->INT_EN);
iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
&adapter->hw.reg->INT_EN);
}
}
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_enable_dma_rx(&adapter->hw);
}
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget);

View File

@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
qdev->req_q_size =
(u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
/* The barrier is required to ensure request and response queue
* addr writes to the registers.
*/
wmb();
qdev->req_q_virt_addr =
pci_alloc_consistent(qdev->pdev,
(size_t) qdev->req_q_size,
@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
return -ENOMEM;
}
qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
qdev->rsp_q_virt_addr =
pci_alloc_consistent(qdev->pdev,
(size_t) qdev->rsp_q_size,

View File

@ -5,7 +5,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3))
depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from

View File

@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
long f_size = skb->hdr_len; /* size of the current fragment */
long f_used = sh_len; /* bytes used from the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int num_edescs = 0;
int segment;
@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb)
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
f_size = sh->frags[f_id].size;
f_size = skb_frag_size(&sh->frags[f_id]);
f_used = 0;
}
@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
struct iphdr *ih;
struct tcphdr *th;
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
long f_id = -1; /* id of the current fragment */
long f_size = skb->hdr_len; /* size of the current fragment */
long f_used = sh_len; /* bytes used from the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int segment;
@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
isum_seed = ((0xFFFF - ih->check) +
(0xFFFF - ih->tot_len) +
(0xFFFF - ih->id));
tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len));
tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
id = ntohs(ih->id);
seq = ntohl(th->seq);
@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
f_size = sh->frags[f_id].size;
f_size = skb_frag_size(&sh->frags[f_id]);
f_used = 0;
}
@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
gxio_mpipe_edesc_t edesc_head = { { 0 } };
gxio_mpipe_edesc_t edesc_body = { { 0 } };
long f_id = -1; /* id of the current fragment */
long f_size = skb->hdr_len; /* size of the current fragment */
long f_used = sh_len; /* bytes used from the current fragment */
void *f_data = skb->data;
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
void *f_data = skb->data + sh_len;
long n; /* size of the current piece of payload */
unsigned long tx_packets = 0, tx_bytes = 0;
unsigned int csum_start;
@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
/* Egress the payload. */
while (p_used < p_len) {
void *va;
/* Advance as needed. */
while (f_used >= f_size) {
f_id++;
f_size = sh->frags[f_id].size;
f_used = 0;
f_size = skb_frag_size(&sh->frags[f_id]);
f_data = tile_net_frag_buf(&sh->frags[f_id]);
f_used = 0;
}
va = f_data + f_used;
/* Use bytes from the current fragment. */
n = p_len - p_used;
if (n > f_size - f_used)
@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
p_used += n;
/* Egress a piece of the payload. */
edesc_body.va = va_to_tile_io_addr(f_data) + f_used;
edesc_body.va = va_to_tile_io_addr(va);
edesc_body.xfer_size = n;
edesc_body.bound = !(p_used < p_len);
gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);

View File

@ -15,6 +15,11 @@ if PHYLIB
comment "MII PHY device drivers"
config AT803X_PHY
tristate "Drivers for Atheros AT803X PHYs"
---help---
Currently supports the AT8030 and AT8035 model
config AMD_PHY
tristate "Drivers for the AMD PHYs"
---help---

View File

@ -25,6 +25,7 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o

176
drivers/net/phy/at803x.c Normal file
View File

@ -0,0 +1,176 @@
/*
* drivers/net/phy/at803x.c
*
* Driver for Atheros 803x PHY
*
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/phy.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_STATUS 0x13
#define AT803X_WOL_ENABLE 0x01
#define AT803X_DEVICE_ADDR 0x03
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
MODULE_DESCRIPTION("Atheros 803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
static void at803x_set_wol_mac_addr(struct phy_device *phydev)
{
struct net_device *ndev = phydev->attached_dev;
const u8 *mac;
unsigned int i, offsets[] = {
AT803X_LOC_MAC_ADDR_32_47_OFFSET,
AT803X_LOC_MAC_ADDR_16_31_OFFSET,
AT803X_LOC_MAC_ADDR_0_15_OFFSET,
};
if (!ndev)
return;
mac = (const u8 *) ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return;
for (i = 0; i < 3; i++) {
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
AT803X_DEVICE_ADDR);
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
offsets[i]);
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
AT803X_FUNC_DATA);
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA,
mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
}
}
static int at803x_config_init(struct phy_device *phydev)
{
int val;
u32 features;
int status;
features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
SUPPORTED_FIBRE | SUPPORTED_BNC;
val = phy_read(phydev, MII_BMSR);
if (val < 0)
return val;
if (val & BMSR_ANEGCAPABLE)
features |= SUPPORTED_Autoneg;
if (val & BMSR_100FULL)
features |= SUPPORTED_100baseT_Full;
if (val & BMSR_100HALF)
features |= SUPPORTED_100baseT_Half;
if (val & BMSR_10FULL)
features |= SUPPORTED_10baseT_Full;
if (val & BMSR_10HALF)
features |= SUPPORTED_10baseT_Half;
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
if (val < 0)
return val;
if (val & ESTATUS_1000_TFULL)
features |= SUPPORTED_1000baseT_Full;
if (val & ESTATUS_1000_THALF)
features |= SUPPORTED_1000baseT_Half;
}
phydev->supported = features;
phydev->advertising = features;
/* enable WOL */
at803x_set_wol_mac_addr(phydev);
status = phy_write(phydev, AT803X_INTR_ENABLE, AT803X_WOL_ENABLE);
status = phy_read(phydev, AT803X_INTR_STATUS);
return 0;
}
/* ATHEROS 8035 */
static struct phy_driver at8035_driver = {
.phy_id = 0x004dd072,
.name = "Atheros 8035 ethernet",
.phy_id_mask = 0xffffffef,
.config_init = at803x_config_init,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
};
/* ATHEROS 8030 */
static struct phy_driver at8030_driver = {
.phy_id = 0x004dd076,
.name = "Atheros 8030 ethernet",
.phy_id_mask = 0xffffffef,
.config_init = at803x_config_init,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.driver = {
.owner = THIS_MODULE,
},
};
static int __init atheros_init(void)
{
int ret;
ret = phy_driver_register(&at8035_driver);
if (ret)
goto err1;
ret = phy_driver_register(&at8030_driver);
if (ret)
goto err2;
return 0;
err2:
phy_driver_unregister(&at8035_driver);
err1:
return ret;
}
static void __exit atheros_exit(void)
{
phy_driver_unregister(&at8035_driver);
phy_driver_unregister(&at8030_driver);
}
module_init(atheros_init);
module_exit(atheros_exit);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ 0x004dd076, 0xffffffef },
{ 0x004dd072, 0xffffffef },
{ }
};
MODULE_DEVICE_TABLE(mdio, atheros_tbl);

View File

@ -592,6 +592,32 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
/* Novatel USB551L and MC551 - handled by qmi_wwan */
{
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = NOVATEL_VENDOR_ID,
.idProduct = 0xB001,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = 0,
},
/* Novatel E362 - handled by qmi_wwan */
{
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = NOVATEL_VENDOR_ID,
.idProduct = 0x9010,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = 0,
},
/*
* WHITELIST!!!
*
@ -604,21 +630,6 @@ static const struct usb_device_id products [] = {
* because of bugs/quirks in a given product (like Zaurus, above).
*/
{
/* Novatel USB551L */
/* This match must come *before* the generic CDC-ETHER match so that
* we get FLAG_WWAN set on the device, since it's descriptors are
* generic CDC-ETHER.
*/
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = NOVATEL_VENDOR_ID,
.idProduct = 0xB001,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
.bInterfaceProtocol = USB_CDC_PROTO_NONE,
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE (Vodafone) K3805-Z */
.match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT

View File

@ -62,6 +62,7 @@
#define USB_PRODUCT_IPAD 0x129a
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define USB_PRODUCT_IPHONE_4S 0x12a0
#define USB_PRODUCT_IPHONE_5 0x12a8
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ USB_DEVICE_AND_INTERFACE_INFO(
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);

View File

@ -369,18 +369,73 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Novatel USB551L and MC551 */
USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0xb001,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Novatel E362 */
USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9010,
USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0021, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0025, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0031, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0042, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0049, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0052, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
{QMI_FIXED_INTF(0x19d2, 0x0058, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
{QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
{QMI_FIXED_INTF(0x19d2, 0x0113, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0118, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0121, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0123, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0124, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0125, 6)},
{QMI_FIXED_INTF(0x19d2, 0x0126, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0130, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0133, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0141, 5)},
{QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */
{QMI_FIXED_INTF(0x19d2, 0x0158, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
{QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
{QMI_FIXED_INTF(0x19d2, 0x1021, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1245, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1247, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1252, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1254, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */

View File

@ -1158,6 +1158,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
usb_anchor_urb(urb, &dev->deferred);
/* no use to process more packets */
netif_stop_queue(net);
usb_put_urb(urb);
spin_unlock_irqrestore(&dev->txq.lock, flags);
netdev_dbg(dev->net, "Delaying transmission for resumption\n");
goto deferred;
@ -1310,6 +1311,8 @@ void usbnet_disconnect (struct usb_interface *intf)
cancel_work_sync(&dev->kevent);
usb_scuttle_anchored_urbs(&dev->deferred);
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);

View File

@ -534,107 +534,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
{0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
{0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
{0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
{0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
{0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
{0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
{0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
{0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
{0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
{0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
{0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
{0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
{0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
{0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
{0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
{0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
{0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
{0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
{0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
{0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83},
{0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84},
{0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
{0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
{0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
{0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
{0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
{0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
{0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
{0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
{0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
{0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
{0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
{0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
{0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
{0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
{0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
{0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
{0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
{0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
{0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
{0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
{0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
{0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
{0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
{0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
{0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
{0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
{0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
{0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
{0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
{0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
{0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
{0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
{0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
{0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
{0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
{0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
{0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
{0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
{0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
{0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
{0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
{0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
{0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
{0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
{0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
{0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
{0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
{0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
{0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
{0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
{0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
{0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
{0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
{0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
{0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
{0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
{0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
{0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
{0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
{0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
{0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
{0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
{0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
{0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
{0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
{0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
{0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
{0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
{0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
{0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
{0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
{0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
{0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
{0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
{0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
{0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
{0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
{0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
{0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
{0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
{0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
{0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
{0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
{0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
{0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
{0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
{0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
{0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
{0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
{0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
{0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
{0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
{0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
{0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
{0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
{0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
{0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
{0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
{0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};

View File

@ -38,6 +38,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0cf3, 0x7015),

View File

@ -5404,6 +5404,8 @@ static void b43_bcma_remove(struct bcma_device *core)
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
return; /* NULL if firmware never loaded */
if (wl->current_dev == wldev && wl->hw_registred) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
@ -5478,6 +5480,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
return; /* NULL if firmware never loaded */
if (wl->current_dev == wldev && wl->hw_registred) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);

View File

@ -1339,7 +1339,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
}
ret = brcmf_bus_start(dev);
if (ret == -ENOLINK) {
if (ret) {
brcmf_dbg(ERROR, "dongle is not responding\n");
brcmf_detach(dev);
goto fail;

View File

@ -3569,7 +3569,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
if (!request || !request->n_ssids || !request->n_match_sets) {
WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
request->n_ssids);
request ? request->n_ssids : 0);
return -EINVAL;
}
@ -3972,7 +3972,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
u8 *iovar_ie_buf;
u8 *curr_ie_buf;
u8 *mgmt_ie_buf = NULL;
u32 mgmt_ie_buf_len = 0;
int mgmt_ie_buf_len;
u32 *mgmt_ie_len = 0;
u32 del_add_ie_buf_len = 0;
u32 total_ie_buf_len = 0;
@ -3982,7 +3982,7 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
struct parsed_vndr_ie_info *vndrie_info;
s32 i;
u8 *ptr;
u32 remained_buf_len;
int remained_buf_len;
WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
@ -4606,12 +4606,13 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct brcmf_channel_info_le channel_le;
struct ieee80211_channel *notify_channel;
struct ieee80211_channel *notify_channel = NULL;
struct ieee80211_supported_band *band;
struct brcmf_bss_info_le *bi;
u32 freq;
s32 err = 0;
u32 target_channel;
u8 *buf;
WL_TRACE("Enter\n");
@ -4619,11 +4620,22 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
memcpy(profile->bssid, e->addr, ETH_ALEN);
brcmf_update_bss_info(cfg);
brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
sizeof(channel_le));
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (buf == NULL) {
err = -ENOMEM;
goto done;
}
target_channel = le32_to_cpu(channel_le.target_channel);
WL_CONN("Roamed to channel %d\n", target_channel);
/* data sent to dongle has to be little endian */
*(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
if (err)
goto done;
bi = (struct brcmf_bss_info_le *)(buf + 4);
target_channel = bi->ctl_ch ? bi->ctl_ch :
CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
if (target_channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
@ -4633,6 +4645,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
freq = ieee80211_channel_to_frequency(target_channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
done:
kfree(buf);
cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
@ -5186,41 +5200,6 @@ brcmf_cfg80211_event(struct net_device *ndev,
schedule_work(&cfg->event_work);
}
static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
{
s32 infra = 0;
s32 err = 0;
switch (iftype) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_WDS:
WL_ERR("type (%d) : currently we do not support this mode\n",
iftype);
err = -EINVAL;
return err;
case NL80211_IFTYPE_ADHOC:
infra = 0;
break;
case NL80211_IFTYPE_STATION:
infra = 1;
break;
case NL80211_IFTYPE_AP:
infra = 1;
break;
default:
err = -EINVAL;
WL_ERR("invalid type (%d)\n", iftype);
return err;
}
err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
if (err) {
WL_ERR("WLC_SET_INFRA error (%d)\n", err);
return err;
}
return 0;
}
static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
{
/* Room for "event_msgs" + '\0' + bitvec */
@ -5439,7 +5418,8 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_dongle_mode(ndev, wdev->iftype);
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
NULL, NULL);
if (err && err != -EINPROGRESS)
goto default_conf_out;
err = brcmf_dongle_probecap(cfg);

View File

@ -10472,7 +10472,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
} else
len = src->len;
dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
if (!dst)
continue;

View File

@ -518,7 +518,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
* See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
struct iwl6000_channel_switch_cmd *cmd;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
@ -527,18 +527,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(cmd), },
.len = { sizeof(*cmd), },
.flags = CMD_SYNC,
.data = { &cmd, },
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int err;
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
hcmd.data[0] = cmd;
cmd->band = priv->band == IEEE80211_BAND_2GHZ;
ch = ch_switch->channel->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
ctx->active.channel, ch);
cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = ctx->staging.flags;
cmd.rxon_filter_flags = ctx->staging.filter_flags;
cmd->channel = cpu_to_le16(ch);
cmd->rxon_flags = ctx->staging.flags;
cmd->rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
@ -554,23 +561,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
switch_count = 0;
}
if (switch_count <= 1)
cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
else {
switch_time_in_usec =
vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
ucode_switch_time = iwl_usecs_to_beacons(priv,
switch_time_in_usec,
beacon_interval);
cmd.switch_time = iwl_add_beacon_time(priv,
priv->ucode_beacon_time,
ucode_switch_time,
beacon_interval);
cmd->switch_time = iwl_add_beacon_time(priv,
priv->ucode_beacon_time,
ucode_switch_time,
beacon_interval);
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
cmd->switch_time);
cmd->expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
return iwl_dvm_send_cmd(priv, &hcmd);
err = iwl_dvm_send_cmd(priv, &hcmd);
kfree(cmd);
return err;
}
struct iwl_lib_ops iwl6000_lib = {

View File

@ -1825,8 +1825,6 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
priv->scan_request = request;
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
GFP_KERNEL);
if (!priv->user_scan_cfg) {
@ -1834,6 +1832,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -ENOMEM;
}
priv->scan_request = request;
priv->user_scan_cfg->num_ssids = request->n_ssids;
priv->user_scan_cfg->ssid_list = request->ssids;
@ -1870,6 +1870,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
if (ret) {
dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
priv->scan_request = NULL;
kfree(priv->user_scan_cfg);
priv->user_scan_cfg = NULL;
return ret;
}

View File

@ -1843,21 +1843,18 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
struct cfg80211_ssid *req_ssid)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret = 0;
int ret;
struct mwifiex_user_scan_cfg *scan_cfg;
if (!req_ssid)
return -1;
if (adapter->scan_processing) {
dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
return ret;
dev_err(adapter->dev, "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
dev_dbg(adapter->dev,
dev_err(adapter->dev,
"cmd: Scan is blocked during association...\n");
return ret;
return -EBUSY;
}
scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);

View File

@ -1988,6 +1988,7 @@ static struct usb_driver rt2500usb_driver = {
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
.reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};

View File

@ -1282,6 +1282,7 @@ static struct usb_driver rt2800usb_driver = {
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
.reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};

View File

@ -2535,6 +2535,7 @@ static struct usb_driver rt73usb_driver = {
.disconnect = rt2x00usb_disconnect,
.suspend = rt2x00usb_suspend,
.resume = rt2x00usb_resume,
.reset_resume = rt2x00usb_resume,
.disable_hub_initiated_lpm = 1,
};

View File

@ -673,7 +673,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
set_hal_start(rtlhal);
/* Start bulk IN */
_rtl_usb_receive(hw);
err = _rtl_usb_receive(hw);
}
return err;

View File

@ -379,7 +379,8 @@ static void handle_rx(struct vhost_net *net)
.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
};
size_t total_len = 0;
int err, headcount, mergeable;
int err, mergeable;
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
/* TODO: check that we are running from vhost_worker? */

View File

@ -191,7 +191,8 @@ struct tcp_sock {
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
early_retrans_delayed:1, /* Delayed ER timer installed */
syn_data:1, /* SYN includes data */
syn_fastopen:1; /* SYN includes Fast Open option */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */

View File

@ -1218,6 +1218,7 @@ struct cfg80211_deauth_request {
const u8 *ie;
size_t ie_len;
u16 reason_code;
bool local_state_change;
};
/**

View File

@ -130,6 +130,7 @@ enum {
#define TCPI_OPT_WSCALE 4
#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
enum tcp_ca_state {
TCP_CA_Open = 0,

View File

@ -463,7 +463,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
if (vlan_uses_dev(dev))
return NOTIFY_BAD;
break;
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:

View File

@ -1167,6 +1167,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
uint16_t crc;
unsigned long entrytime;
spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
@ -1210,8 +1212,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
/**
* batadv_bla_check_bcast_duplist
* @bat_priv: the bat priv with all the soft interface information
* @bcast_packet: originator mac address
* @hdr_size: maximum length of the frame
* @bcast_packet: encapsulated broadcast frame plus batman header
* @bcast_packet_len: length of encapsulated broadcast frame plus batman header
*
* check if it is on our broadcast list. Another gateway might
* have sent the same packet because it is connected to the same backbone,
@ -1224,20 +1226,22 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
*/
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct batadv_bcast_packet *bcast_packet,
int hdr_size)
int bcast_packet_len)
{
int i, length, curr;
int i, length, curr, ret = 0;
uint8_t *content;
uint16_t crc;
struct batadv_bcast_duplist_entry *entry;
length = hdr_size - sizeof(*bcast_packet);
length = bcast_packet_len - sizeof(*bcast_packet);
content = (uint8_t *)bcast_packet;
content += sizeof(*bcast_packet);
/* calculate the crc ... */
crc = crc16(0, content, length);
spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
curr = (bat_priv->bla.bcast_duplist_curr + i);
curr %= BATADV_DUPLIST_SIZE;
@ -1259,9 +1263,12 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return 1 to forbid it.
*/
return 1;
ret = 1;
goto out;
}
/* not found, add a new entry (overwrite the oldest entry) */
/* not found, add a new entry (overwrite the oldest entry)
* and allow it, its the first occurence.
*/
curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bla.bcast_duplist[curr];
@ -1270,8 +1277,10 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
bat_priv->bla.bcast_duplist_curr = curr;
/* allow it, its the first occurence. */
return 0;
out:
spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
return ret;
}

View File

@ -1124,8 +1124,14 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
spin_unlock_bh(&orig_node->bcast_seqno_lock);
/* keep skb linear for crc calculation */
if (skb_linearize(skb) < 0)
goto out;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* check whether this has been sent by another originator before */
if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len))
goto out;
/* rebroadcast packet */

View File

@ -205,6 +205,8 @@ struct batadv_priv_bla {
struct batadv_hashtable *backbone_hash;
struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
/* protects bcast_duplist and bcast_duplist_curr */
spinlock_t bcast_duplist_lock;
struct batadv_bla_claim_dst claim_dest;
struct delayed_work work;
};

View File

@ -32,6 +32,8 @@
#define SMP_TIMEOUT msecs_to_jiffies(30000)
#define AUTH_REQ_MASK 0x07
static inline void swap128(u8 src[16], u8 dst[16])
{
int i;
@ -230,7 +232,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
req->init_key_dist = 0;
req->resp_key_dist = dist_keys;
req->auth_req = authreq;
req->auth_req = (authreq & AUTH_REQ_MASK);
return;
}
@ -239,7 +241,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
rsp->init_key_dist = 0;
rsp->resp_key_dist = req->resp_key_dist & dist_keys;
rsp->auth_req = authreq;
rsp->auth_req = (authreq & AUTH_REQ_MASK);
}
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)

View File

@ -3379,10 +3379,12 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen)
if (head_stolen) {
skb_release_head_state(skb);
kmem_cache_free(skbuff_head_cache, skb);
else
} else {
__kfree_skb(skb);
}
}
EXPORT_SYMBOL(kfree_skb_partial);

View File

@ -1163,8 +1163,12 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
struct rtable *orig;
struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
if (orig && rt_is_expired(orig)) {
fnhe->fnhe_gw = 0;
fnhe->fnhe_pmtu = 0;
fnhe->fnhe_expires = 0;
}
if (fnhe->fnhe_pmtu) {
unsigned long expires = fnhe->fnhe_expires;
unsigned long diff = expires - jiffies;
@ -1181,7 +1185,6 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
} else if (!rt->rt_gateway)
rt->rt_gateway = daddr;
orig = rcu_dereference(fnhe->fnhe_rth);
rcu_assign_pointer(fnhe->fnhe_rth, rt);
if (orig)
rt_free(orig);

View File

@ -549,14 +549,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
!tp->urg_data ||
before(tp->urg_seq, tp->copied_seq) ||
!before(tp->urg_seq, tp->rcv_nxt)) {
struct sk_buff *skb;
answ = tp->rcv_nxt - tp->copied_seq;
/* Subtract 1, if FIN is in queue. */
skb = skb_peek_tail(&sk->sk_receive_queue);
if (answ && skb)
answ -= tcp_hdr(skb)->fin;
/* Subtract 1, if FIN was received */
if (answ && sock_flag(sk, SOCK_DONE))
answ--;
} else
answ = tp->urg_seq - tp->copied_seq;
release_sock(sk);
@ -2766,6 +2764,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_options |= TCPI_OPT_ECN;
if (tp->ecn_flags & TCP_ECN_SEEN)
info->tcpi_options |= TCPI_OPT_ECN_SEEN;
if (tp->syn_data_acked)
info->tcpi_options |= TCPI_OPT_SYN_DATA;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);

View File

@ -5646,6 +5646,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
tcp_rearm_rto(sk);
return true;
}
tp->syn_data_acked = tp->syn_data;
return false;
}
@ -5963,7 +5964,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
req = tp->fastopen_rsk;
if (req != NULL) {
BUG_ON(sk->sk_state != TCP_SYN_RECV &&
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
@ -6052,7 +6053,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* ACK we have received, this would have acknowledged
* our SYNACK so stop the SYNACK timer.
*/
if (acceptable && req != NULL) {
if (req != NULL) {
/* Return RST if ack_seq is invalid.
* Note that RFC793 only says to generate a
* DUPACK for it but for TCP Fast Open it seems
* better to treat this case like TCP_SYN_RECV
* above.
*/
if (!acceptable)
return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);

View File

@ -1461,6 +1461,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
skb_set_owner_r(skb, child);
__skb_queue_tail(&child->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
tp->syn_data_acked = 1;
}
sk->sk_data_ready(sk, 0);
bh_unlock_sock(child);

View File

@ -510,6 +510,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}

View File

@ -347,8 +347,8 @@ void tcp_retransmit_timer(struct sock *sk)
return;
}
if (tp->fastopen_rsk) {
BUG_ON(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk->sk_state != TCP_FIN_WAIT1);
tcp_fastopen_synack_timer(sk);
/* Before we receive ACK to our SYN-ACK don't retransmit
* anything else (e.g., data or FIN segments).

View File

@ -219,7 +219,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
};
static const u32 ip6_template_metrics[RTAX_MAX] = {
[RTAX_HOPLIMIT - 1] = 255,
[RTAX_HOPLIMIT - 1] = 0,
};
static const struct rt6_info ip6_null_entry_template = {
@ -1232,7 +1232,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;

View File

@ -853,7 +853,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
dev_kfree_skb_irq(skb);
ieee80211_free_txskb(&local->hw, skb);
}
}
}

View File

@ -3099,22 +3099,32 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ht_cfreq, ht_oper->primary_chan,
cbss->channel->band);
ht_oper = NULL;
} else {
channel_type = NL80211_CHAN_HT20;
}
}
if (ht_oper) {
channel_type = NL80211_CHAN_HT20;
if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
/*
* cfg80211 already verified that the channel itself can
* be used, but it didn't check that we can do the right
* HT type, so do that here as well. If HT40 isn't allowed
* on this channel, disable 40 MHz operation.
*/
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
switch (ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
else
channel_type = NL80211_CHAN_HT40PLUS;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
else
channel_type = NL80211_CHAN_HT40MINUS;
break;
}
break;
}
}
@ -3549,6 +3559,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
bool tx = !req->local_state_change;
mutex_lock(&ifmgd->mtx);
@ -3565,12 +3576,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
req->reason_code, true, frame_buf);
req->reason_code, tx, frame_buf);
} else {
drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_deauth_disassoc(sdata, req->bssid,
IEEE80211_STYPE_DEAUTH,
req->reason_code, true,
req->reason_code, tx,
frame_buf);
}

View File

@ -650,7 +650,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
*/
if (!skb)
break;
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
}
/*
@ -679,7 +679,7 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
local->total_ps_buffered--;
ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
sta->sta.addr);
dev_kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
}
/*

View File

@ -406,7 +406,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
int queue = info->hw_queue;
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
return;
}
@ -431,7 +431,7 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
ieee80211_free_txskb(&local->hw, skb);
continue;
}

View File

@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_MMIC_ERROR)
goto mic_fail;
if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
goto update_iv;
return RX_CONTINUE;
@ -545,14 +546,19 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
static void bip_aad(struct sk_buff *skb, u8 *aad)
{
__le16 mask_fc;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
/* BIP AAD: FC(masked) || A1 || A2 || A3 */
/* FC type/subtype */
aad[0] = skb->data[0];
/* Mask FC Retry, PwrMgt, MoreData flags to zero */
aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6));
mask_fc = hdr->frame_control;
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM |
IEEE80211_FCTL_MOREDATA);
put_unaligned(mask_fc, (__le16 *) &aad[0]);
/* A1 || A2 || A3 */
memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN);
memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
}

View File

@ -2589,6 +2589,8 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
struct ip_vs_proto_data *pd;
#endif
memset(u, 0, sizeof (*u));
#ifdef CONFIG_IP_VS_PROTO_TCP
pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
@ -2766,7 +2768,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
memset(&t, 0, sizeof(t));
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;

View File

@ -180,9 +180,9 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct ctnl_timeout *timeout;
struct nf_conn_timeout *timeout_ext;
const struct ipt_entry *e = par->entryinfo;
struct nf_conntrack_l4proto *l4proto;
int ret = 0;
u8 proto;
rcu_read_lock();
timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
@ -192,9 +192,11 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
goto out;
}
if (e->ip.invflags & IPT_INV_PROTO) {
proto = xt_ct_find_proto(par);
if (!proto) {
ret = -EINVAL;
pr_info("You cannot use inversion on L4 protocol\n");
pr_info("You must specify a L4 protocol, and not use "
"inversions on it.\n");
goto out;
}
@ -214,7 +216,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
l4proto = __nf_ct_l4proto_find(par->family, e->ip.proto);
l4proto = __nf_ct_l4proto_find(par->family, proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info("Timeout policy `%s' can only be used by L4 protocol "

View File

@ -70,6 +70,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
fl4.daddr = info->gw.ip;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return false;

View File

@ -111,7 +111,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
@ -123,7 +123,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
{
@ -133,7 +133,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
{
@ -143,7 +143,7 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
.targetsize = sizeof(struct nf_nat_range),
.table = "nat",
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN),
(1 << NF_INET_LOCAL_OUT),
.me = THIS_MODULE,
},
};

View File

@ -138,6 +138,8 @@ static int netlink_dump(struct sock *sk);
static DEFINE_RWLOCK(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
static ATOMIC_NOTIFIER_HEAD(netlink_chain);
static inline u32 netlink_group_mask(u32 group)
@ -345,6 +347,11 @@ netlink_update_listeners(struct sock *sk)
struct hlist_node *node;
unsigned long mask;
unsigned int i;
struct listeners *listeners;
listeners = nl_deref_protected(tbl->listeners);
if (!listeners)
return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
@ -352,7 +359,7 @@ netlink_update_listeners(struct sock *sk)
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
tbl->listeners->masks[i] = mask;
listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
@ -536,7 +543,11 @@ static int netlink_release(struct socket *sock)
if (netlink_is_kernel(sk)) {
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
kfree(nl_table[sk->sk_protocol].listeners);
struct listeners *old;
old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
kfree_rcu(old, rcu);
nl_table[sk->sk_protocol].module = NULL;
nl_table[sk->sk_protocol].bind = NULL;
nl_table[sk->sk_protocol].flags = 0;
@ -982,7 +993,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
rcu_read_lock();
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (group - 1 < nl_table[sk->sk_protocol].groups)
if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
@ -1625,7 +1636,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
old = rcu_dereference_protected(tbl->listeners, 1);
old = nl_deref_protected(tbl->listeners);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);

View File

@ -457,20 +457,14 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
.reason_code = reason,
.ie = ie,
.ie_len = ie_len,
.local_state_change = local_state_change,
};
ASSERT_WDEV_LOCK(wdev);
if (local_state_change) {
if (wdev->current_bss &&
ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
}
if (local_state_change && (!wdev->current_bss ||
!ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
return 0;
}
return rdev->ops->deauth(&rdev->wiphy, dev, &req);
}