dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (74 commits)
  Revert "b43: Enforce DMA descriptor memory constraints"
  iwmc3200wifi: fix array out-of-boundary access
  wl1251: timeout one too soon in wl1251_boot_run_firmware()
  mac80211: fix propagation of failed hardware reconfigurations
  mac80211: fix race with suspend and dynamic_ps_disable_work
  ath9k: fix missed error codes in the tx status check
  ath9k: wake hardware during AMPDU TX actions
  ath9k: wake hardware for interface IBSS/AP/Mesh removal
  ath9k: fix suspend by waking device prior to stop
  cfg80211: fix error path in cfg80211_wext_siwscan
  wl1271_cmd.c: cleanup char => u8
  iwlwifi: Storage class should be before const qualifier
  ath9k: Storage class should be before const qualifier
  cfg80211: fix race between deauth and assoc response
  wireless: remove remaining qual code
  rt2x00: Add USB ID for Linksys WUSB 600N rev 2.
  ath5k: fix SWI calibration interrupt storm
  mac80211: fix ibss join with fixed-bssid
  libertas: Remove carrier signaling from the scan code
  orinoco: fix GFP_KERNEL in orinoco_set_key with interrupts disabled
  ...
This commit is contained in:
Linus Torvalds 2009-12-30 12:37:35 -08:00
commit c3bf4906fb
90 changed files with 863 additions and 813 deletions

View File

@ -56,6 +56,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
/* Fill in the station address. */
memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
sizeof(dev->dev_addr));
memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
/* The Tx-block list is written as needed. We just set up the values. */
lp->tx_cmd_link = IDLELOOP + 4;

View File

@ -2346,6 +2346,7 @@ config GELIC_NET
config GELIC_WIRELESS
bool "PS3 Wireless support"
depends on WLAN
depends on GELIC_NET
select WIRELESS_EXT
help
@ -2358,6 +2359,7 @@ config GELIC_WIRELESS
config GELIC_WIRELESS_OLD_PSK_INTERFACE
bool "PS3 Wireless private PSK interface (OBSOLETE)"
depends on GELIC_WIRELESS
select WEXT_PRIV
help
This option retains the obsolete private interface to pass
the PSK from user space programs to the driver. The PSK

View File

@ -275,6 +275,7 @@ struct be_adapter {
u32 tx_fc; /* Tx flow control */
int link_speed;
u8 port_type;
u8 transceiver;
};
extern const struct ethtool_ops be_ethtool_ops;

View File

@ -1479,6 +1479,41 @@ err:
return status;
}
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_lmode *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
sizeof(*req));
req->src_port = port_num;
req->dest_port = port_num;
req->loopback_type = loopback_type;
req->loopback_state = enable;
status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
req->hdr.timeout = 4;
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);

View File

@ -155,6 +155,7 @@ struct be_mcc_mailbox {
#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
@ -821,6 +822,19 @@ struct be_cmd_resp_loopback_test {
u32 ticks_compl;
};
struct be_cmd_req_set_lmode {
struct be_cmd_req_hdr hdr;
u8 src_port;
u8 dest_port;
u8 loopback_type;
u8 loopback_state;
};
struct be_cmd_resp_set_lmode {
struct be_cmd_resp_hdr resp_hdr;
u8 rsvd0[4];
};
/********************** DDR DMA test *********************/
struct be_cmd_req_ddrdma_test {
struct be_cmd_req_hdr hdr;
@ -912,3 +926,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);

View File

@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
#define BE_NO_LOOPBACK 0xff
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_read_port_type(adapter, adapter->port_num,
&connector);
switch (connector) {
case 7:
ecmd->port = PORT_FIBRE;
break;
default:
ecmd->port = PORT_TP;
break;
if (!status) {
switch (connector) {
case 7:
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
break;
case 0:
ecmd->port = PORT_TP;
ecmd->transceiver = XCVR_EXTERNAL;
break;
default:
ecmd->port = PORT_TP;
ecmd->transceiver = XCVR_INTERNAL;
break;
}
} else {
ecmd->port = PORT_AUI;
ecmd->transceiver = XCVR_INTERNAL;
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
ecmd->transceiver = adapter->transceiver;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
ecmd->phy_address = adapter->port_num;
ecmd->transceiver = XCVR_INTERNAL;
switch (ecmd->port) {
case PORT_FIBRE:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
break;
case PORT_TP:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
break;
case PORT_AUI:
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
break;
}
return 0;
}
@ -489,6 +512,19 @@ err:
return ret;
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
be_cmd_set_loopback(adapter, adapter->port_num,
loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->port_num,
loopback_type, 1500,
2, 0xabc);
be_cmd_set_loopback(adapter, adapter->port_num,
BE_NO_LOOPBACK, 1);
return *status;
}
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
BE_MAC_LOOPBACK, 1500,
2, 0xabc);
if (data[0] != 0)
if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
&data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
BE_PHY_LOOPBACK, 1500,
2, 0xabc);
if (data[1] != 0)
}
if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
&data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
BE_ONE_PORT_EXT_LOOPBACK,
1500, 2, 0xabc);
if (data[2] != 0)
}
if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
&data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
data[3] = be_test_ddr_dma(adapter);
if (data[3] != 0)

View File

@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif

View File

@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
best->slave->dev->master->name);
best->slave ? best->slave->dev->master->name : "NULL");
}
best->is_active = 1;

View File

@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_select_queue = gfar_select_queue,
.ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
return priv->vlgrp || priv->rx_csum_enable;
}
u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
{
return skb_get_queue_mapping(skb);
}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
if (amount_pull)
if (amount_pull) {
skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
}
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {

View File

@ -87,6 +87,7 @@ History:
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
/* copy out MAC address */
for (z = 0; z < sizeof(dev->dev_addr); z++)
for (z = 0; z < ETH_ALEN; z++)
dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
/* print config */

View File

@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}

View File

@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
if (ret_val)
goto out;
/* Set number of link attempts before downshift */
ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
if (ret_val)
goto out;
phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
out:
return ret_val;

View File

@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;

View File

@ -1306,13 +1306,8 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
if (mac->type < e1000_82576) {
fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
fc->low_water = fc->high_water - 8;
} else {
fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
}
fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
fc->current_mode = fc->requested_mode;

View File

@ -2763,7 +2763,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
"PF still in reset state, assigning new address\n");
"PF still in reset state, assigning new address."
" Is the PF interface up?\n");
random_ether_addr(hw->mac.addr);
} else {
err = hw->mac.ops.read_mac_addr(hw);

View File

@ -4373,6 +4373,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* pci_restore_state clears dev->state_saved so call
* pci_save_state to restore it.
*/
pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {

View File

@ -45,6 +45,7 @@ static const char *const version =
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->perm_addr))
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
printk(" %pM", dev->dev_addr);

View File

@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
EFX_LOG(efx, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
goto err;
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);

View File

@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
static void falcon_remove_port(struct efx_nic *efx)
{
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}

View File

@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
}
/* Get status of XAUI link */
static bool falcon_xaui_link_ok(struct efx_nic *efx)
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
if (LOOPBACK_INTERNAL(efx))
return true;
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
/* If the link is up, then check the phy side of the xaui link */
if (efx->link_state.up && link_ok)
if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
link_ok = efx_mdio_phyxgxs_lane_sync(efx);
return link_ok;
}
static bool falcon_xmac_link_ok(struct efx_nic *efx)
{
/*
* Check MAC's XGXS link status except when using XGMII loopback
* which bypasses the XGXS block.
* If possible, check PHY's XGXS link status except when using
* MAC loopback.
*/
return (efx->loopback_mode == LOOPBACK_XGMII ||
falcon_xgxs_link_ok(efx)) &&
(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
LOOPBACK_INTERNAL(efx) ||
efx_mdio_phyxgxs_lane_sync(efx));
}
void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
bool mac_up = falcon_xaui_link_ok(efx);
bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
falcon_reset_xaui(efx);
udelay(200);
mac_up = falcon_xaui_link_ok(efx);
mac_up = falcon_xmac_link_ok(efx);
--tries;
}
@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
return !falcon_check_xaui_link_up(efx, 5);
return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_reconfigure_mac_wrapper(efx);
efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_mask_status_intr(efx, true);
return 0;
@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
return;
falcon_mask_status_intr(efx, false);
efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_mask_status_intr(efx, true);
}

View File

@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_cfg *phy_cfg;
struct efx_mcdi_phy_cfg *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
/* TODO: Move phy_data initialisation to
* phy_op->probe/remove, rather than init/fini */
phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
if (phy_cfg == NULL) {
rc = -ENOMEM;
goto fail_alloc;
}
rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
/* Initialise and populate phy_data */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (phy_data == NULL)
return -ENOMEM;
rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
efx->phy_type = phy_cfg->type;
/* Read initial link advertisement */
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
goto fail;
efx->mdio_bus = phy_cfg->channel;
efx->mdio.prtad = phy_cfg->port;
efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
/* Fill out nic state */
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
efx->mdio_bus = phy_data->channel;
efx->mdio.prtad = phy_data->port;
efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->link_advertising =
mcdi_to_ethtool_cap(phy_data->media, caps);
else
phy_data->forced_cap = caps;
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
kfree(phy_cfg);
return 0;
fail:
kfree(phy_cfg);
fail_alloc:
return rc;
}
static int efx_mcdi_phy_init(struct efx_nic *efx)
{
struct efx_mcdi_phy_cfg *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (phy_data == NULL)
return -ENOMEM;
rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
efx->phy_data = phy_data;
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc)
goto fail;
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->link_advertising =
mcdi_to_ethtool_cap(phy_data->media, caps);
else
phy_data->forced_cap = caps;
return 0;
fail:
@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
static void efx_mcdi_phy_fini(struct efx_nic *efx)
static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_mcdi_phy_init,
.init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_mcdi_phy_fini,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.run_tests = NULL,

View File

@ -524,6 +524,7 @@ struct efx_phy_operations {
int (*probe) (struct efx_nic *efx);
int (*init) (struct efx_nic *efx);
void (*fini) (struct efx_nic *efx);
void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
void (*get_settings) (struct efx_nic *efx,

View File

@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);

View File

@ -33,6 +33,9 @@
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
#define PCS_FW_PRODUCT_CODE_1 0xd7f0
#define PCS_FW_VERSION_1 0xd7f3
#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
bool bug17190_in_bad_state;
unsigned long bug17190_timer;
u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
static int qt2025c_wait_reset(struct efx_nic *efx)
#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
#define QT2025C_HEARTB_WAIT 100
#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
#define QT2025C_FWSTART_WAIT 100
#define BUG17190_INTERVAL (2 * HZ)
static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
unsigned long timeout = jiffies + 10 * HZ;
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
old_counter = counter;
else if (counter != old_counter)
break;
if (time_after(jiffies, timeout))
if (time_after(jiffies, timeout)) {
/* Some cables have EEPROMs that conflict with the
* PHY's on-board EEPROM so it cannot load firmware */
EFX_ERR(efx, "If an SFP+ direct attach cable is"
" connected, please check that it complies"
" with the SFP+ specification\n");
return -ETIMEDOUT;
msleep(10);
}
msleep(QT2025C_HEARTB_WAIT);
}
return 0;
}
static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
int reg;
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(QT2025C_FWSTART_WAIT);
}
return 0;
}
static void qt2025c_restart_firmware(struct efx_nic *efx)
{
/* Restart microcontroller execution of firmware from RAM */
efx_mdio_write(efx, 3, 0xe854, 0x00c0);
efx_mdio_write(efx, 3, 0xe854, 0x0040);
msleep(50);
}
static int qt2025c_wait_reset(struct efx_nic *efx)
{
int rc;
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
if (rc == -ETIMEDOUT) {
/* Bug 17689: occasionally heartbeat starts but firmware status
* code never progresses beyond 0x00. Try again, once, after
* restarting execution of the firmware image. */
EFX_LOG(efx, "bashing QT2025C microcontroller\n");
qt2025c_restart_firmware(efx);
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
return rc;
rc = qt2025c_wait_fw_status_good(efx);
}
return rc;
}
static void qt2025c_firmware_id(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
u8 firmware_id[9];
size_t i;
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
(firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
firmware_id[3] >> 4, firmware_id[3] & 0xf,
firmware_id[4], firmware_id[5],
firmware_id[6], firmware_id[7], firmware_id[8]);
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
((firmware_id[3] & 0x0f) << 16) |
(firmware_id[4] << 8) | firmware_id[5];
}
static void qt2025c_bug17190_workaround(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
/* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
* layers up, but PCS down (no block_lock). If we notice this state
* persisting for a couple of seconds, we switch PMA/PMD loopback
* briefly on and then off again, which is normally sufficient to
* recover it.
*/
if (efx->link_state.up ||
!efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
phy_data->bug17190_in_bad_state = false;
return;
}
if (!phy_data->bug17190_in_bad_state) {
phy_data->bug17190_in_bad_state = true;
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
return;
}
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, false);
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
}
}
static int qt2025c_select_phy_mode(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
struct falcon_board *board = falcon_board(efx);
int reg, rc, i;
uint16_t phy_op_mode;
/* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
* Self-Configure mode. Don't attempt any switching if we encounter
* older firmware. */
if (phy_data->firmware_ver < 0x02000100)
return 0;
/* In general we will get optimal behaviour in "SFP+ Self-Configure"
* mode; however, that powers down most of the PHY when no module is
* present, so we must use a different mode (any fixed mode will do)
* to be sure that loopbacks will work. */
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
/* Only change mode if really necessary */
reg = efx_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
/* This sequence replicates the register writes configured in the boot
* EEPROM (including the differences between board revisions), except
* that the operating mode is changed, and the PHY is prevented from
* unnecessarily reloading the main firmware image again. */
efx_mdio_write(efx, 1, 0xc300, 0x0000);
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
* STOPs onto the firmware/module I2C bus to reset it, varies across
* board revisions, as the bus is connected to different GPIO/LED
* outputs on the PHY.) */
if (board->major == 0 && board->minor < 2) {
efx_mdio_write(efx, 1, 0xc303, 0x4498);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x4488);
efx_mdio_write(efx, 1, 0xc303, 0x4480);
efx_mdio_write(efx, 1, 0xc303, 0x4490);
efx_mdio_write(efx, 1, 0xc303, 0x4498);
}
} else {
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x0900);
efx_mdio_write(efx, 1, 0xd008, 0x0005);
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
efx_mdio_write(efx, 1, 0xc302, 0x0004);
efx_mdio_write(efx, 1, 0xc316, 0x0013);
efx_mdio_write(efx, 1, 0xc318, 0x0054);
efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
efx_mdio_write(efx, 1, 0xc31a, 0x0098);
efx_mdio_write(efx, 3, 0x0026, 0x0e00);
efx_mdio_write(efx, 3, 0x0027, 0x0013);
efx_mdio_write(efx, 3, 0x0028, 0xa528);
efx_mdio_write(efx, 1, 0xd006, 0x000a);
efx_mdio_write(efx, 1, 0xd007, 0x0009);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
/* This additional write is not present in the boot EEPROM. It
* prevents the PHY's internal boot ROM doing another pointless (and
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
efx_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
/* Restart microcontroller execution of firmware from RAM */
qt2025c_restart_firmware(efx);
/* Wait for the microcontroller to be ready again */
rc = qt2025c_wait_reset(efx);
if (rc < 0) {
EFX_ERR(efx, "PHY microcontroller reset during mode switch "
"timed out\n");
return rc;
}
return 0;
@ -137,6 +335,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_phy_probe(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data;
phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
phy_data->bug17190_in_bad_state = false;
phy_data->bug17190_timer = 0;
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@ -145,7 +353,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_init(struct efx_nic *efx)
{
struct qt202x_phy_data *phy_data;
u32 devid;
int rc;
@ -155,17 +362,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
phy_data->phy_mode = efx->phy_mode;
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
return 0;
}
@ -183,6 +387,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_bug17190_workaround(efx);
return efx->link_state.up != was_up;
}
@ -191,6 +398,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
int rc = qt2025c_select_phy_mode(efx);
if (rc)
return rc;
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
@ -224,7 +435,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
static void qt202x_phy_fini(struct efx_nic *efx)
static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
@ -236,7 +447,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
.fini = qt202x_phy_fini,
.fini = efx_port_dummy_op_void,
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
};

View File

@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
void siena_remove_port(struct efx_nic *efx)
{
efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}

View File

@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
int rc;
rtnl_lock();
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
MDIO_PMA_10GBT_TXPWR_SHORT,
count != 0 && *buf != '0');
rc = efx_reconfigure_port(efx);
if (efx->state != STATE_RUNNING) {
rc = -EBUSY;
} else {
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
MDIO_PMA_10GBT_TXPWR_SHORT,
count != 0 && *buf != '0');
rc = efx_reconfigure_port(efx);
}
rtnl_unlock();
return rc < 0 ? rc : (ssize_t)count;
@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
return 0;
}
static int sfx7101_phy_probe(struct efx_nic *efx)
{
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
static int sft9001_phy_probe(struct efx_nic *efx)
{
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
FALCON_GMAC_LOOPBACKS);
return 0;
}
static int tenxpress_phy_init(struct efx_nic *efx)
static int tenxpress_phy_probe(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data;
int rc = 0;
falcon_board(efx)->type->init_phy(efx);
int rc;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
/* Create any special files */
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
if (rc)
goto fail;
}
if (efx->phy_type == PHY_TYPE_SFX7101) {
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45;
efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
ADVERTISED_10000baseT_Full);
} else {
efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = (SFT9001_LOOPBACKS |
FALCON_XMAC_LOOPBACKS |
FALCON_GMAC_LOOPBACKS);
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full);
}
return 0;
fail:
kfree(efx->phy_data);
efx->phy_data = NULL;
return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
int rc;
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto fail;
return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
goto fail;
return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
goto fail;
return rc;
/* Initialise advertising flags */
efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
ADVERTISED_10000baseT_Full);
if (efx->phy_type != PHY_TYPE_SFX7101)
efx->link_advertising |= (ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full);
/* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
if (rc)
goto fail;
}
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
fail:
kfree(efx->phy_data);
efx->phy_data = NULL;
return rc;
}
/* Perform a "special software reset" on the PHY. The caller is
@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
static void tenxpress_phy_fini(struct efx_nic *efx)
static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
* powerdown has been given long enough to complete. */
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
static void tenxpress_phy_remove(struct efx_nic *efx)
{
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
* powerdown has been given long enough to complete. */
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
.probe = sfx7101_phy_probe,
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.fini = sfx7101_phy_fini,
.remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
};
struct efx_phy_operations falcon_sft9001_phy_ops = {
.probe = sft9001_phy_probe,
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.fini = efx_port_dummy_op_void,
.remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,

View File

@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
EFX_TXQ_MASK];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
buffer->len = 0;
buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
buffer->len = 0;
buffer->continuation = true;
}
}

View File

@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible_sync(sk->sk_sleep);
tun = container_of(sk, struct tun_sock, sk)->tun;
tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
free_netdev(tun_sk(sk)->tun->dev);
}
static struct proto tun_proto = {
@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
container_of(sk, struct tun_sock, sk)->tun = tun;
tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);

View File

@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
/* Wait for and prevent any further xmits. */
/* Prevent any further xmits, plus detach the device. */
netif_device_detach(ugeth->ndev);
/* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
netif_tx_wake_all_queues(ugeth->ndev);
netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
ugeth->oldspeed = phydev->speed;
}
/*
* To change the MAC configuration we need to disable the
* controller. To do so, we have to either grab ugeth->lock,
* which is a bad idea since 'graceful stop' commands might
* take quite a while, or we can quiesce driver's activity.
*/
ugeth_quiesce(ugeth);
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
out_be32(&ug_regs->maccfg2, tempval);
out_be32(&uf_regs->upsmr, upsmr);
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
ugeth_activate(ugeth);
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
if (new_state) {
/*
* To change the MAC configuration we need to disable
* the controller. To do so, we have to either grab
* ugeth->lock, which is a bad idea since 'graceful
* stop' commands might take quite a while, or we can
* quiesce driver's activity.
*/
ugeth_quiesce(ugeth);
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
out_be32(&ug_regs->maccfg2, tempval);
out_be32(&uf_regs->upsmr, upsmr);
ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
ugeth_activate(ugeth);
}
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
@ -3273,7 +3279,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
if (bd == ugeth->txBd[txQ]) /* queue empty? */
break;
dev->stats.tx_packets++;

View File

@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@ -389,6 +390,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@ -407,6 +409,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
INIT_WORK(&rp->reset_task, rhine_reset_task);
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
return 0;
}
static void rhine_tx_timeout(struct net_device *dev)
static void rhine_reset_task(struct work_struct *work)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
dev->name, ioread16(ioaddr + IntrStatus),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
struct rhine_private *rp = container_of(work, struct rhine_private,
reset_task);
struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
spin_lock(&rp->lock);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
spin_unlock(&rp->lock);
spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
dev->name, ioread16(ioaddr + IntrStatus),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
schedule_work(&rp->reset_task);
}
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
spin_lock_irq(&rp->lock);
netif_stop_queue(dev);
napi_disable(&rp->napi);
cancel_work_sync(&rp->reset_task);
netif_stop_queue(dev);
spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "

View File

@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
high_dma = 1;
if (pci_set_consistent_dma_mask(pdev,
0xffffffffffffffffULL)) {
DMA_BIT_MASK(64))) {
vxge_debug_init(VXGE_ERR,
"%s : unable to obtain 64bit DMA for "
"consistent allocations", __func__);
ret = -ENOMEM;
goto _exit1;
}
} else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 32bit DMA", __func__);
} else {

View File

@ -1903,17 +1903,6 @@ accept:
rxs->noise = sc->ah->ah_noise_floor;
rxs->signal = rxs->noise + rs.rs_rssi;
/* An rssi of 35 indicates you should be able use
* 54 Mbps reliably. A more elaborate scheme can be used
* here but it requires a map of SNR/throughput for each
* possible mode used */
rxs->qual = rs.rs_rssi * 100 / 35;
/* rssi can be more than 35 though, anything above that
* should be considered at 100% */
if (rxs->qual > 100)
rxs->qual = 100;
rxs->antenna = rs.rs_antenna;
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
/* Set PHY calibration interval */
ah->ah_cal_intval = ath5k_calinterval;
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
/* Set ack to be sent at low bit-rates */
ath5k_hw_set_ack_bitrate_high(ah, false);
/* Set PHY calibration inteval */
ah->ah_cal_intval = ath5k_calinterval;
ret = 0;
done:
mmiowb();

View File

@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
wait = wait_time;
while (ath9k_hw_numtxpending(ah, q)) {
if ((--wait) == 0) {
ath_print(common, ATH_DBG_QUEUE,
ath_print(common, ATH_DBG_FATAL,
"Failed to stop TX DMA in 100 "
"msec after killing last frame\n");
break;

View File

@ -77,6 +77,9 @@
#define ATH9K_TXERR_XTXOP 0x08
#define ATH9K_TXERR_TIMER_EXPIRED 0x10
#define ATH9K_TX_ACKED 0x20
#define ATH9K_TXERR_MASK \
(ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
#define ATH9K_TX_BA 0x01
#define ATH9K_TX_PWRMGMT 0x02

View File

@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
/* Stop ANI */
del_timer_sync(&common->ani.timer);
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
/* Start ANI */
ath_start_ani(common);
return r;
}
@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
return; /* another wiphy still in use */
}
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc);
/* Finally, put the chip in FULL SLEEP mode */
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
@ -2641,8 +2653,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
ath9k_ps_wakeup(sc);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
ath_beacon_return(sc, avp);
ath9k_ps_restore(sc);
}
sc->sc_flags &= ~SC_OP_BEACONS;
@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
ath9k_ps_wakeup(sc);
ath_tx_aggr_start(sc, sta, tid, ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ath9k_ps_wakeup(sc);
ath_tx_aggr_resume(sc, sta, tid);
ath9k_ps_restore(sc);
break;
default:
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,

View File

@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
const static struct ath_bus_ops ath_pci_bus_ops = {
static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
.cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,

View File

@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (npend) {
int r;
ath_print(common, ATH_DBG_XMIT,
ath_print(common, ATH_DBG_FATAL,
"Unable to stop TxDMA. Reset HAL!\n");
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
if (r)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d\n",
@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
* For HT capable stations, we save tidno for later use.
* We also override seqno set by upper layer with the one
* in tx aggregation state.
*
* If fragmentation is on, the sequence number is
* not overridden, since it has been
* incremented by the fragmentation routine.
*
* FIXME: check if the fragmentation threshold exceeds
* IEEE80211 max.
*/
tid = ATH_AN_2_TID(an, bf->bf_tidno);
hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
IEEE80211_SEQ_SEQ_SHIFT);
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
bf->bf_seqno = tid->seq_next;
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
}
if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
(sc->sc_flags & SC_OP_TXAGGR))
assign_aggr_tid_seqno(skb, bf);
bf->bf_mpdu = skb;
@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int hdrlen, padsize;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
int padpos, padsize;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_tx_control txctl;
@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
* BSSes.
*/
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
sc->tx.seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* Add the padding after the header if this is not already done */
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
if (hdrlen & 3) {
padsize = hdrlen % 4;
padpos = ath9k_cmn_padpos(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len>padpos) {
if (skb_headroom(skb) < padsize) {
ath_print(common, ATH_DBG_XMIT,
"TX CABQ padding failed\n");
@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, hdrlen);
memmove(skb->data, skb->data + padsize, padpos);
}
txctl.txq = sc->beacon.cabq;
@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int hdrlen, padsize;
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
padsize = hdrlen & 3;
if (padsize && hdrlen >= 24) {
padpos = ath9k_cmn_padpos(hdr->frame_control);
padsize = padpos & 3;
if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
memmove(skb->data + padsize, skb->data, hdrlen);
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);

View File

@ -383,160 +383,44 @@ static inline
}
}
/* Check if a DMA region fits the device constraints.
* Returns true, if the region is OK for usage with this device. */
static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
dma_addr_t addr, size_t size)
{
switch (ring->type) {
case B43_DMA_30BIT:
if ((u64)addr + size > (1ULL << 30))
return 0;
break;
case B43_DMA_32BIT:
if ((u64)addr + size > (1ULL << 32))
return 0;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
}
return 1;
}
#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
dma_addr_t dmaaddr, size_t size)
{
ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
free_pages((unsigned long)base, get_order(size));
}
static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
dma_addr_t *dmaaddr, size_t size,
gfp_t gfp_flags)
{
void *base;
base = (void *)__get_free_pages(gfp_flags, get_order(size));
if (!base)
return NULL;
memset(base, 0, size);
*dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
DMA_TO_DEVICE);
if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
free_pages((unsigned long)base, get_order(size));
return NULL;
}
return base;
}
static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
dma_addr_t *dmaaddr, size_t size)
{
void *base;
base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
GFP_KERNEL);
if (!base) {
b43err(ring->dev->wl, "Failed to allocate or map pages "
"for DMA ringmemory\n");
return NULL;
}
if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
/* The memory does not fit our device constraints.
* Retry with GFP_DMA set to get lower memory. */
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
GFP_KERNEL | GFP_DMA);
if (!base) {
b43err(ring->dev->wl, "Failed to allocate or map pages "
"in the GFP_DMA region for DMA ringmemory\n");
return NULL;
}
if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
b43err(ring->dev->wl, "Failed to allocate DMA "
"ringmemory that fits device constraints\n");
return NULL;
}
}
/* We expect the memory to be 4k aligned, at least. */
if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
return NULL;
}
return base;
}
static int alloc_ringmemory(struct b43_dmaring *ring)
{
unsigned int required;
void *base;
dma_addr_t dmaaddr;
gfp_t flags = GFP_KERNEL;
/* There are several requirements to the descriptor ring memory:
* - The memory region needs to fit the address constraints for the
* device (same as for frame buffers).
* - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
* - For 64bit DMA devices, the descriptor ring must be 8k aligned.
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
* alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
* has shown that 4K is sufficient for the latter as long as the buffer
* does not cross an 8K boundary.
*
* For unknown reasons - possibly a hardware error - the BCM4311 rev
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
* which accounts for the GFP_DMA flag below.
*
* The flags here must match the flags in free_ringmemory below!
*/
if (ring->type == B43_DMA_64BIT)
required = ring->nr_slots * sizeof(struct b43_dmadesc64);
else
required = ring->nr_slots * sizeof(struct b43_dmadesc32);
if (B43_WARN_ON(required > 0x1000))
flags |= GFP_DMA;
ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
ring->alloc_descsize = 0x1000;
base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
if (!base)
return -ENOMEM;
ring->alloc_descbase = base;
ring->alloc_dmabase = dmaaddr;
if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
/* We're on <=32bit DMA, or we already got 8k aligned memory.
* That's all we need, so we're fine. */
ring->descbase = base;
ring->dmabase = dmaaddr;
return 0;
}
b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
/* Ok, we failed at the 8k alignment requirement.
* Try to force-align the memory region now. */
ring->alloc_descsize = 0x2000;
base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
if (!base)
return -ENOMEM;
ring->alloc_descbase = base;
ring->alloc_dmabase = dmaaddr;
if (is_8k_aligned(dmaaddr)) {
/* We're already 8k aligned. That Ok, too. */
ring->descbase = base;
ring->dmabase = dmaaddr;
return 0;
}
/* Force-align it to 8k */
ring->descbase = (void *)((u8 *)base + 0x1000);
ring->dmabase = dmaaddr + 0x1000;
B43_WARN_ON(!is_8k_aligned(ring->dmabase));
memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
ring->alloc_dmabase, ring->alloc_descsize);
gfp_t flags = GFP_KERNEL;
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase, flags);
}
/* Reset the RX DMA channel */
@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1;
if (!b43_dma_address_ok(ring, addr, buffersize)) {
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
return 1;
switch (ring->type) {
case B43_DMA_30BIT:
if ((u64)addr + buffersize > (1ULL << 30))
goto address_error;
break;
case B43_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
goto address_error;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
}
/* The address is OK. */
return 0;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
ssb_dma_sync_single_for_device(ring->dev->dev,
ring->alloc_dmabase,
ring->alloc_descsize, DMA_TO_DEVICE);
return 0;
}
@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
}
/* Now transfer the whole frame. */
wmb();
ssb_dma_sync_single_for_device(ring->dev->dev,
ring->alloc_dmabase,
ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot));
return 0;

View File

@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
} __attribute__ ((__packed__));
/* Misc DMA constants */
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
@ -246,12 +247,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
/* Pointers and size of the originally allocated and mapped memory
* region for the descriptor ring. */
void *alloc_descbase;
dma_addr_t alloc_dmabase;
unsigned int alloc_descsize;
/* Pointer to our wireless device. */
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */

View File

@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
snr = rx_stats_sig_avg / rx_stats_noise_diff;
rx_status.noise = rx_status.signal -
iwl3945_calc_db_from_ratio(snr);
rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
rx_status.noise);
/* If noise info not available, calculate signal quality indicator (%)
* using just the dBm signal level. */
} else {
rx_status.noise = priv->last_rx_noise;
rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
}
IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
rx_status.signal, rx_status.noise, rx_status.qual,
IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
rx_status.signal, rx_status.noise,
rx_stats_sig_avg, rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
rc = -EIO;
}
priv->alloc_rxb_page--;
free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
};
struct pci_device_id iwl3945_hw_card_ids[] = {

View File

@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
*
*****************************************************************************/
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,

View File

@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
/* calculate tx gain adjustment based on power supply voltage */
voltage = priv->calib_info->voltage;
voltage = le16_to_cpu(priv->calib_info->voltage);
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
voltage_compensation =
iwl4965_get_voltage_compensation(voltage, init_voltage);

View File

@ -92,11 +92,15 @@
static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
EEPROM_5000_TEMPERATURE);
/* offset = temperature - voltage / coef */
s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
return offset;
u16 temperature, voltage;
__le16 *temp_calib =
(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
voltage = le16_to_cpu(temp_calib[1]);
/* offset = temp - volt / coeff */
return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
/* Fixed (non-configurable) rx data from phy */

View File

@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
__le16 *xtal_calib =
(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
cmd.cap_pin1 = (u8)xtal_calib[0];
cmd.cap_pin2 = (u8)xtal_calib[1];
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
(u8 *)&cmd, sizeof(cmd));
}

View File

@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
};
/* mbps, mcs */
const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
{ "2", "QPSK DSSS"},
{"5.5", "BPSK CCK"},

View File

@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
/* this spin lock will be used in apm_ops.init and EEPROM access
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);

View File

@ -77,8 +77,7 @@
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
*
* NOTE: Newer devices using one-time-programmable (OTP) memory
* require device to be awake in order to read this memory
* NOTE: Device does need to be awake in order to read this memory
* via CSR_EEPROM and CSR_OTP registers
*/
#define CSR_BASE (0x000)
@ -111,9 +110,8 @@
/*
* EEPROM and OTP (one-time-programmable) memory reads
*
* NOTE: For (newer) devices using OTP, device must be awake, initialized via
* apm_ops.init() in order to read. Older devices (3945/4965/5000)
* use EEPROM and do not require this.
* NOTE: Device must be awake, initialized via apm_ops.init(),
* in order to read.
*/
#define CSR_EEPROM_REG (CSR_BASE+0x02c)
#define CSR_EEPROM_GP (CSR_BASE+0x030)

View File

@ -1168,7 +1168,7 @@ struct iwl_priv {
u32 last_beacon_time;
u64 last_tsf;
/* eeprom */
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
{
__free_pages(page, priv->hw_params.rx_page_order);
priv->alloc_rxb_page--;
}
static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
{
free_pages(page, priv->hw_params.rx_page_order);
priv->alloc_rxb_page--;
}
#endif /* __iwl_dev_h__ */

View File

@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
return ret;
}
static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
}
@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
*/
static bool iwl_is_otp_empty(struct iwl_priv *priv)
{
u16 next_link_addr = 0, link_value;
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
static int iwl_find_otp_image(struct iwl_priv *priv,
u16 *validblockaddr)
{
u16 next_link_addr = 0, link_value = 0, valid_addr;
u16 next_link_addr = 0, valid_addr;
__le16 link_value = 0;
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* check for more block on the link list
*/
valid_addr = next_link_addr;
next_link_addr = link_value * sizeof(u16);
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
int iwl_eeprom_init(struct iwl_priv *priv)
{
u16 *e;
__le16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
int sz;
int ret;
@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = -ENOMEM;
goto alloc_err;
}
e = (u16 *)priv->eeprom;
e = (__le16 *)priv->eeprom;
if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
/* OTP reads require powered-up chip */
priv->cfg->ops->lib->apm_ops.init(priv);
}
priv->cfg->ops->lib->apm_ops.init(priv);
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
for (addr = validblockaddr; addr < validblockaddr + sz;
addr += sizeof(u16)) {
u16 eeprom_data;
__le16 eeprom_data;
ret = iwl_read_otp_word(priv, addr, &eeprom_data);
if (ret)
@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[cache_addr / 2] = eeprom_data;
cache_addr += sizeof(u16);
}
/*
* Now that OTP reads are complete, reset chip to save
* power until we load uCode during "up".
*/
priv->cfg->ops->lib->apm_ops.stop(priv);
} else {
/* eeprom is an array of 16bit values */
for (addr = 0; addr < sz; addr += sizeof(u16)) {
@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
goto done;
}
r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
ret = 0;
@ -603,6 +595,8 @@ done:
err:
if (ret)
iwl_eeprom_free(priv);
/* Reset chip to save power until we load uCode during "up". */
priv->cfg->ops->lib->apm_ops.stop(priv);
alloc_err:
return ret;
}
@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
ch_info->ht40_eeprom = *eeprom_ch;
ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
ch_info->ht40_flags = eeprom_ch->flags;
ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
return 0;
}

View File

@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
*
*/
struct iwl_eeprom_enhanced_txpwr {
u16 common;
__le16 common;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
struct iwl_eeprom_calib_info {
u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
u8 saturation_power52; /* half-dBm */
s16 voltage; /* signed */
__le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
} __attribute__ ((packed));

View File

@ -234,7 +234,7 @@ cancel:
}
fail:
if (cmd->reply_page) {
free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd->reply_page);
cmd->reply_page = 0;
}
out:

View File

@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__free_pages(rxq->pool[i].page,
priv->hw_params.rx_page_order);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
priv->alloc_rxb_page--;
}
}
@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_page--;
__free_pages(rxq->pool[i].page,
priv->hw_params.rx_page_order);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
#define PERFECT_RSSI (-20) /* dBm */
#define WORST_RSSI (-95) /* dBm */
#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
/* Calculate an indication of rx signal quality (a percentage, not dBm!).
* See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
* about formulas used below. */
static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
{
int sig_qual;
int degradation = PERFECT_RSSI - rssi_dbm;
/* If we get a noise measurement, use signal-to-noise ratio (SNR)
* as indicator; formula is (signal dbm - noise dbm).
* SNR at or above 40 is a great signal (100%).
* Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
* Weakest usable signal is usually 10 - 15 dB SNR. */
if (noise_dbm) {
if (rssi_dbm - noise_dbm >= 40)
return 100;
else if (rssi_dbm < noise_dbm)
return 0;
sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
/* Else use just the signal level.
* This formula is a least squares fit of data points collected and
* compared with a reference system that had a percentage (%) display
* for signal quality. */
} else
sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
(15 * RSSI_RANGE + 62 * degradation)) /
(RSSI_RANGE * RSSI_RANGE);
if (sig_qual > 100)
sig_qual = 100;
else if (sig_qual < 1)
sig_qual = 0;
return sig_qual;
}
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwl_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
@ -1105,11 +1060,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (iwl_is_associated(priv) &&
!test_bit(STATUS_SCANNING, &priv->status)) {
rx_status.noise = priv->last_rx_noise;
rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
rx_status.noise);
} else {
rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
}
/* Reset beacon noise level if not associated. */
@ -1122,8 +1074,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
iwl_dbg_report_frame(priv, phy_res, len, header, 1);
#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
rx_status.signal, rx_status.noise, rx_status.qual,
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
rx_status.signal, rx_status.noise,
(unsigned long long)rx_status.mactime);
/*

View File

@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
clear_bit(STATUS_SCAN_HW, &priv->status);
}
priv->alloc_rxb_page--;
free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd.reply_page);
return ret;
}

View File

@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
break;
}
}
priv->alloc_rxb_page--;
free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
break;
}
}
priv->alloc_rxb_page--;
free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd.reply_page);
return ret;
}

View File

@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
int txq_id;
/* Tx queues */
if (priv->txq)
if (priv->txq) {
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
}
iwl_free_dma_ptr(priv, &priv->kw);
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);

View File

@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
if ((iwl_queue_space(q) < q->high_mark))
goto drop;
spin_lock_irqsave(&priv->lock, flags);
idx = get_cmd_index(q, q->write_ptr, 0);
@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_page--;
__free_pages(rxq->pool[i].page,
priv->hw_params.rx_page_order);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
__free_pages(rxq->pool[i].page,
priv->hw_params.rx_page_order);
__iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
priv->alloc_rxb_page--;
}
}
@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
#define PERFECT_RSSI (-20) /* dBm */
#define WORST_RSSI (-95) /* dBm */
#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
/* Calculate an indication of rx signal quality (a percentage, not dBm!).
* See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
* about formulas used below. */
int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
{
int sig_qual;
int degradation = PERFECT_RSSI - rssi_dbm;
/* If we get a noise measurement, use signal-to-noise ratio (SNR)
* as indicator; formula is (signal dbm - noise dbm).
* SNR at or above 40 is a great signal (100%).
* Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
* Weakest usable signal is usually 10 - 15 dB SNR. */
if (noise_dbm) {
if (rssi_dbm - noise_dbm >= 40)
return 100;
else if (rssi_dbm < noise_dbm)
return 0;
sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
/* Else use just the signal level.
* This formula is a least squares fit of data points collected and
* compared with a reference system that had a percentage (%) display
* for signal quality. */
} else
sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
(15 * RSSI_RANGE + 62 * degradation)) /
(RSSI_RANGE * RSSI_RANGE);
if (sig_qual > 100)
sig_qual = 100;
else if (sig_qual < 1)
sig_qual = 0;
return sig_qual;
}
/**
* iwl3945_rx_handle - Main entry function for receiving responses from uCode
*
@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->retry_rate = 1;
priv->ibss_beacon = NULL;
spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
IEEE80211_HW_SPECTRUM_MGMT;
if (!priv->cfg->broken_powersave)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
/* this spin lock will be used in apm_ops.init and EEPROM access
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/***********************
* 4. Read EEPROM

View File

@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
struct list_head rx_packets[IWM_RX_ID_HASH];
struct list_head rx_packets[IWM_RX_ID_HASH + 1];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
int iwm_down(struct iwm_priv *iwm);
/* TX API */
u16 iwm_tid_to_queue(u16 tid);
int iwm_tid_to_queue(u16 tid);
void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
void iwm_tx_worker(struct work_struct *work);
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);

View File

@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
*/
static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
u16 iwm_tid_to_queue(u16 tid)
int iwm_tid_to_queue(u16 tid)
{
if (tid > IWM_UMAC_TID_NR - 2)
return -EINVAL;

View File

@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
if (!stop) {
struct iwm_tx_queue *txq;
u16 queue = iwm_tid_to_queue(bit);
int queue = iwm_tid_to_queue(bit);
if (queue < 0)
continue;

View File

@ -2,6 +2,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
sizeof(priv->dev->dev_addr));
memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);

View File

@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
chan_count = lbs_scan_create_channel_list(priv, chan_list);
netif_stop_queue(priv->dev);
netif_carrier_off(priv->dev);
if (priv->mesh_dev) {
if (priv->mesh_dev)
netif_stop_queue(priv->mesh_dev);
netif_carrier_off(priv->mesh_dev);
}
/* Prepare to continue an interrupted scan */
lbs_deb_scan("chan_count %d, scan_channel %d\n",
@ -635,16 +632,13 @@ out2:
priv->scan_channel = 0;
out:
if (priv->connect_status == LBS_CONNECTED) {
netif_carrier_on(priv->dev);
if (!priv->tx_pending_len)
netif_wake_queue(priv->dev);
}
if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
netif_carrier_on(priv->mesh_dev);
if (!priv->tx_pending_len)
netif_wake_queue(priv->mesh_dev);
}
if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
netif_wake_queue(priv->dev);
if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
!priv->tx_pending_len)
netif_wake_queue(priv->mesh_dev);
kfree(chan_list);
lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);

View File

@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
if (priv->connect_status == LBS_CONNECTED) {
memcpy(extra, priv->curbssparams.ssid,
priv->curbssparams.ssid_len);
extra[priv->curbssparams.ssid_len] = '\0';
} else {
memset(extra, 0, 32);
extra[priv->curbssparams.ssid_len] = '\0';
}
/*
* If none, we may want to get the one that was set

View File

@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
stats.noise = prxpd->nf;
stats.qual = prxpd->snr - prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;

View File

@ -23,7 +23,7 @@
#define MAX_RID_LEN 1024
/* Helper routine to record keys
* Do not call from interrupt context */
* It is called under orinoco_lock so it may not sleep */
static int orinoco_set_key(struct orinoco_private *priv, int index,
enum orinoco_alg alg, const u8 *key, int key_len,
const u8 *seq, int seq_len)
@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
kzfree(priv->keys[index].seq);
if (key_len) {
priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
if (!priv->keys[index].key)
goto nomem;
} else
priv->keys[index].key = NULL;
if (seq_len) {
priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
if (!priv->keys[index].seq)
goto free_key;
} else

View File

@ -1323,7 +1323,7 @@
#define PAIRWISE_KEY_ENTRY(__idx) \
( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
#define MAC_IVEIV_ENTRY(__idx) \
( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
#define MAC_WCID_ATTR_ENTRY(__idx) \
( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
#define SHARED_KEY_ENTRY(__idx) \

View File

@ -37,7 +37,7 @@
#include <linux/module.h>
#include "rt2x00.h"
#ifdef CONFIG_RT2800USB
#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
#include "rt2x00usb.h"
#endif
#include "rt2800lib.h"
@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_intf_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
#ifdef CONFIG_RT2800USB
#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@ -2021,6 +2021,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u16 eeprom;
/*
* Disable powersaving as default on PCI devices.
*/
if (rt2x00_intf_is_pci(rt2x00dev))
rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
*/
@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_TX_STBC |
IEEE80211_HT_CAP_RX_STBC |
IEEE80211_HT_CAP_PSMP_SUPPORT;
IEEE80211_HT_CAP_RX_STBC;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
}
static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)

View File

@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },

View File

@ -2538,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *tx_power;
unsigned int i;
/*
* Disable powersaving as default.
*/
rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
*/

View File

@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
/* TODO: improve signal/rssi reporting */
rx_status.qual = flags2 & 0xFF;
rx_status.signal = (flags2 >> 8) & 0x7F;
/* XXX: is this correct? */
rx_status.rate_idx = (flags >> 20) & 0xF;

View File

@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
}
}
if (loop >= INIT_LOOP) {
if (loop > INIT_LOOP) {
wl1251_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;

View File

@ -777,7 +777,7 @@ out:
return ret;
}
static int wl1271_build_basic_rates(char *rates, u8 band)
static int wl1271_build_basic_rates(u8 *rates, u8 band)
{
u8 index = 0;
@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
return index;
}
static int wl1271_build_extended_rates(char *rates, u8 band)
static int wl1271_build_extended_rates(u8 *rates, u8 band)
{
u8 index = 0;

View File

@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
return r;
}
static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
{
static const u16 constants[] = {
715, 655, 585, 540, 470, 410, 360, 315,
270, 235, 205, 175, 150, 125, 105, 85,
65, 50, 40, 25, 15
};
int i;
u32 x;
/* It seems that their quality parameter is somehow per signal
* and is now transferred per bit.
*/
switch (zd_rate) {
case ZD_OFDM_RATE_6M:
case ZD_OFDM_RATE_12M:
case ZD_OFDM_RATE_24M:
size *= 2;
break;
case ZD_OFDM_RATE_9M:
case ZD_OFDM_RATE_18M:
case ZD_OFDM_RATE_36M:
case ZD_OFDM_RATE_54M:
size *= 4;
size /= 3;
break;
case ZD_OFDM_RATE_48M:
size *= 3;
size /= 2;
break;
default:
return -EINVAL;
}
x = (10000 * status_quality)/size;
for (i = 0; i < ARRAY_SIZE(constants); i++) {
if (x > constants[i])
break;
}
switch (zd_rate) {
case ZD_OFDM_RATE_6M:
case ZD_OFDM_RATE_9M:
i += 3;
break;
case ZD_OFDM_RATE_12M:
case ZD_OFDM_RATE_18M:
i += 5;
break;
case ZD_OFDM_RATE_24M:
case ZD_OFDM_RATE_36M:
i += 9;
break;
case ZD_OFDM_RATE_48M:
case ZD_OFDM_RATE_54M:
i += 15;
break;
default:
return -EINVAL;
}
return i;
}
static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
{
int r;
r = ofdm_qual_db(status_quality, zd_rate, size);
ZD_ASSERT(r >= 0);
if (r < 0)
r = 0;
r = (r * 100)/29;
return r <= 100 ? r : 100;
}
static unsigned int log10times100(unsigned int x)
{
static const u8 log10[] = {
0,
0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
223, 223, 223, 224, 224, 224, 224,
};
return x < ARRAY_SIZE(log10) ? log10[x] : 225;
}
enum {
MAX_CCK_EVM_DB = 45,
};
static int cck_evm_db(u8 status_quality)
{
return (20 * log10times100(status_quality)) / 100;
}
static int cck_snr_db(u8 status_quality)
{
int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
ZD_ASSERT(r >= 0);
return r;
}
static int cck_qual_percent(u8 status_quality)
{
int r;
r = cck_snr_db(status_quality);
r = (100*r)/17;
return r <= 100 ? r : 100;
}
static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
{
return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
}
u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
const struct rx_status *status)
{
return (status->frame_status&ZD_RX_OFDM) ?
ofdm_qual_percent(status->signal_quality_ofdm,
zd_rate_from_ofdm_plcp_header(rx_frame),
size) :
cck_qual_percent(status->signal_quality_cck);
}
/**
* zd_rx_rate - report zd-rate
* @rx_frame - received frame

View File

@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
struct rx_status;
u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
const struct rx_status *status);
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
struct zd_mc_hash {

View File

@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = status->signal_strength;
stats.qual = zd_rx_qual_percent(buffer,
length - sizeof(struct rx_status),
status);
rate = zd_rx_rate(buffer, status);

View File

@ -832,7 +832,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000
#define IEEE80211_HT_CAP_RESERVED 0x2000
#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000

View File

@ -81,6 +81,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)

View File

@ -482,6 +482,7 @@ enum
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
NET_IPV4_CONF_ACCEPT_LOCAL=23,
NET_IPV4_CONF_SRC_VMARK=24,
__NET_IPV4_CONF_MAX
};

View File

@ -547,7 +547,6 @@ enum mac80211_rx_flags {
* unspecified depending on the hardware capabilities flags
* @IEEE80211_HW_SIGNAL_*
* @noise: noise when receiving this frame, in dBm.
* @qual: overall signal quality indication, in percent (0-100).
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT rates are use (RX_FLAG_HT)
@ -559,7 +558,6 @@ struct ieee80211_rx_status {
int freq;
int signal;
int noise;
int __deprecated qual;
int antenna;
int rate_idx;
int flag;
@ -1737,6 +1735,12 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
local_bh_enable();
}
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
*/
#define IEEE80211_TX_STATUS_HEADROOM 13
/**
* ieee80211_tx_status - transmit status callback
*

View File

@ -250,8 +250,7 @@ struct pktgen_dev {
__u64 count; /* Default No packets to send */
__u64 sofar; /* How many pkts we've sent so far */
__u64 tx_bytes; /* How many bytes we've transmitted */
__u64 errors; /* Errors when trying to transmit,
pkts will be re-sent */
__u64 errors; /* Errors when trying to transmit, */
/* runtime counters relating to clone_skb */
@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->seq_num++;
pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
break;
case NET_XMIT_DROP:
case NET_XMIT_CN:
case NET_XMIT_POLICED:
/* skb has been consumed */
pkt_dev->errors++;
break;
default: /* Drivers are not supposed to return other values! */
if (net_ratelimit())
pr_info("pktgen: %s xmit error: %d\n",

View File

@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
"accept_source_route"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),

View File

@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
no_addr = in_dev->ifa_list == NULL;
rpf = IN_DEV_RPFILTER(in_dev);
accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
if (mark && !IN_DEV_SRC_VMARK(in_dev))
fl.mark = 0;
}
rcu_read_unlock();

View File

@ -34,9 +34,28 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
ht_cap->ht_supported = true;
ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap;
ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS;
ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS;
/*
* The bits listed in this expression should be
* the same for the peer and us, if the station
* advertises more then we can't use those thus
* we mask them out.
*/
ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) &
(sband->ht_cap.cap |
~(IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40));
/*
* The STBC bits are asymmetric -- if we don't have
* TX then mask out the peer's RX and vice versa.
*/
if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC;
if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
ampdu_info = ht_cap_ie->ampdu_params_info;
ht_cap->ampdu_factor =

View File

@ -382,6 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
u8 *bssid,u8 *addr, u32 supp_rates)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int band = local->hw.conf.channel->band;
@ -397,6 +398,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
}
if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
return NULL;
if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
return NULL;

View File

@ -515,6 +515,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
sizeof(struct ieee80211_tx_status_rtap_hdr));
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
sizeof(struct ieee80211_tx_status_rtap_hdr));

View File

@ -915,6 +915,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
IEEE80211_STA_BEACON_POLL);
/*
* Always handle WMM once after association regardless
* of the first value the AP uses. Setting -1 here has
* that effect because the AP values is an unsigned
* 4-bit value.
*/
sdata->u.mgd.wmm_last_param_set = -1;
ieee80211_led_assoc(local, 1);
sdata->vif.bss_conf.assoc = 1;

View File

@ -1419,6 +1419,10 @@ static bool need_dynamic_ps(struct ieee80211_local *local)
if (!local->ps_sdata)
return false;
/* No point if we're going to suspend */
if (local->quiescing)
return false;
return true;
}

View File

@ -1039,7 +1039,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* restart hardware */
if (local->open_count) {
/*
* Upon resume hardware can sometimes be goofy due to
* various platform / driver / bus issues, so restarting
* the device may at times not work immediately. Propagate
* the error.
*/
res = drv_start(local);
if (res) {
WARN(local->suspended, "Harware became unavailable "
"upon resume. This is could be a software issue"
"prior to suspend or a harware issue\n");
return res;
}
ieee80211_led_radio(local, true);
}

View File

@ -93,7 +93,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
}
}
WARN_ON(!bss);
/*
* We might be coming here because the driver reported
* a successful association at the same time as the
* user requested a deauth. In that case, we will have
* removed the BSS from the auth_bsses list due to the
* deauth request when the assoc response makes it. If
* the two code paths acquire the lock the other way
* around, that's just the standard situation of a
* deauth being requested while connected.
*/
if (!bss)
goto out;
} else if (wdev->conn) {
cfg80211_sme_failed_assoc(wdev);
/*

View File

@ -601,7 +601,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
struct cfg80211_registered_device *rdev;
struct wiphy *wiphy;
struct iw_scan_req *wreq = NULL;
struct cfg80211_scan_request *creq;
struct cfg80211_scan_request *creq = NULL;
int i, err, n_channels = 0;
enum ieee80211_band band;
@ -694,8 +694,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
/* translate "Scan for SSID" request */
if (wreq) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;
if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out;
}
memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
creq->ssids[0].ssid_len = wreq->essid_len;
}
@ -707,12 +709,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
err = rdev->ops->scan(wiphy, dev, creq);
if (err) {
rdev->scan_req = NULL;
kfree(creq);
/* creq will be freed below */
} else {
nl80211_send_scan_start(rdev, dev);
/* creq now owned by driver */
creq = NULL;
dev_hold(dev);
}
out:
kfree(creq);
cfg80211_unlock_rdev(rdev);
return err;
}

View File

@ -1445,7 +1445,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (!dev)
goto free_dst;
/* Copy neighbout for reachability confirmation */
/* Copy neighbour for reachability confirmation */
dst0->neighbour = neigh_clone(dst->neighbour);
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);