summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
commit71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch)
treef74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /drivers/net
parentb97526f3ff95f92b107f0fb52cbb8627e395429b (diff)
parenta6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff)
downloadlinux-rt-71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/rocker/rocker.c The rocker commit was two overlapping changes, one to rename the ->vport member to ->pport, and another making the bitmask expression use '1ULL' instead of plain '1'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/phy/phy.c23
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/Kconfig13
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/plusb.c5
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/wan/cosa.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig6
-rw-r--r--drivers/net/xen-netback/netback.c29
59 files changed, 985 insertions, 595 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 84673ebcf428..df51d6025a90 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -157,7 +157,7 @@ config IPVLAN
making it transparent to the connected L2 switch.
Ipvlan devices can be added using the "ip" command from the
- iproute2 package starting with the iproute2-X.Y.ZZ release:
+ iproute2 package starting with the iproute2-3.19 release:
"ip link add link <main-dev> [ NAME ] type ipvlan"
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 4ce6ca5f3d36..dc6b78e5342f 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -40,7 +40,7 @@ config DEV_APPLETALK
config LTPC
tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
+ depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
help
This allows you to use the AppleTalk PC card to connect to LocalTalk
networks. The card is also known as the Farallon PhoneNet PC card.
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0f217e99904f..22e2ebf31333 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -107,8 +107,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
{ \
u32 indir, dir; \
spin_lock(&priv->indir_lock); \
- indir = reg_readl(priv, REG_DIR_DATA_READ); \
dir = __raw_readl(priv->name + off); \
+ indir = reg_readl(priv, REG_DIR_DATA_READ); \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
} \
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
link->open++;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ax_open(dev);
} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
- init_timer(&info->watchdog);
- info->watchdog.function = ei_watchdog;
- info->watchdog.data = (u_long)dev;
- info->watchdog.expires = jiffies + HZ;
- add_timer(&info->watchdog);
+ setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ mod_timer(&info->watchdog, jiffies + HZ);
return ei_open(dev);
} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index a1ee261bff5c..fd9296a5014d 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+ (count < limit)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
int rxcomplete = 0;
- int txcomplete = 0;
unsigned long int flags;
- txcomplete = tse_tx_complete(priv);
+ tse_tx_complete(priv);
rxcomplete = tse_rx(priv, budget);
- if (rxcomplete >= budget || txcomplete > 0)
- return rxcomplete;
+ if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
- netdev_dbg(priv->dev,
- "NAPI Complete, did %d packets with budget %d\n",
- txcomplete+rxcomplete, budget);
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ rxcomplete, budget);
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
- priv->dmaops->enable_rxirq(priv);
- priv->dmaops->enable_txirq(priv);
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- return rxcomplete + txcomplete;
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ }
+ return rxcomplete;
}
/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct altera_tse_private *priv;
- unsigned long int flags;
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
}
priv = netdev_priv(dev);
- /* turn off desc irqs and enable napi rx */
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ spin_lock(&priv->rxdma_irq_lock);
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
if (likely(napi_schedule_prep(&priv->napi))) {
+ spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
- /* reset IRQs */
- priv->dmaops->clear_rxirq(priv);
- priv->dmaops->clear_txirq(priv);
-
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
return IRQ_HANDLED;
}
@@ -1407,7 +1406,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &priv->rx_fifo_depth)) {
+ &priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
}
}
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
return -EINVAL;
}
- phy_stop(pdata->phydev);
-
spin_lock_irqsave(&pdata->lock, flags);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 0);
- /* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
phy_start(pdata->phydev);
- /* Enable Tx/Rx */
+ xgbe_napi_enable(pdata, 0);
+
hw_if->powerup_tx(pdata);
hw_if->powerup_rx(pdata);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_attach(netdev);
- xgbe_napi_enable(pdata, 0);
netif_tx_start_all_queues(netdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct net_device *netdev = pdata->netdev;
+ int ret;
DBGPR("-->xgbe_start\n");
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
phy_start(pdata->phydev);
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
xgbe_init_tx_timers(pdata);
- xgbe_napi_enable(pdata, 1);
netif_tx_start_all_queues(netdev);
DBGPR("<--xgbe_start\n");
return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
}
static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
- phy_stop(pdata->phydev);
-
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int i;
-
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->dev_irq);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- synchronize_irq(channel->dma_irq);
- }
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel = NULL;
- unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- /* Request interrupts */
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
- goto err_rings;
- }
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev,
- "error requesting irq %d\n",
- channel->dma_irq);
- goto err_irq;
- }
- }
- }
-
ret = xgbe_start(pdata);
if (ret)
- goto err_start;
+ goto err_rings;
DBGPR("<--xgbe_open\n");
return 0;
-err_start:
- hw_if->exit(pdata);
-
-err_irq:
- if (pdata->per_channel_irq) {
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
err_rings:
desc_if->free_ring_resources(pdata);
@@ -1399,30 +1424,16 @@ err_phy_init:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- unsigned int i;
DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupts */
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
- STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
+ STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
s = &bcm_sysport_gstrings_stats[i];
switch (s->type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_SOFT:
continue;
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_SOFT,
};
/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
#define STAT_RXCHK(str, m, ofs) { \
.stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 51300532ec26..84feb241d60b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
+ BCMGENET_STAT_SOFT,
};
struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
#define STAT_GENET_MISC(str, m, offset) { \
.stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
UMAC_RBUF_OVFL_CNT),
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
- STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
- STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
- STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
+ STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+ STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_SOFT:
continue;
case BCMGENET_STAT_MIB_RX:
case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
}
/* Unlocked version of the reclaim routine */
-static void __bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int pkts_compl = 0;
unsigned int bds_compl;
unsigned int c_index;
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = ring->cbs + last_c_index;
bds_compl = 0;
if (tx_cb_ptr->skb) {
+ pkts_compl++;
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
last_c_index &= (num_tx_bds - 1);
}
- if (ring->free_bds > (MAX_SKB_FRAGS + 1))
- ring->int_disable(priv, ring);
-
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+ }
ring->c_index = c_index;
+
+ return pkts_compl;
}
-static void bcmgenet_tx_reclaim(struct net_device *dev,
+static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ unsigned int released;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
- __bcmgenet_tx_reclaim(dev, ring);
+ released = __bcmgenet_tx_reclaim(dev, ring);
spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_tx_ring *ring =
+ container_of(napi, struct bcmgenet_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+
+ if (work_done == 0) {
+ napi_complete(napi);
+ ring->int_enable(ring->priv, ring);
+
+ return 0;
+ }
+
+ return budget;
}
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
- if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- ring->int_enable(priv, ring);
- }
out:
spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
struct device *kdev = &priv->pdev->dev;
int ret;
u32 reg, cpu_mask_clear;
+ int index;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ for (index = 0; index < priv->hw_params->tx_queues; index++)
+ bcmgenet_intrl2_1_writel(priv, (1 << index),
+ INTRL2_CPU_MASK_CLEAR);
+
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
@@ -1690,6 +1720,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
u32 flow_period_val = 0;
spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
ring->index = index;
if (index == DESC_INDEX) {
ring->queue = 0;
@@ -1732,6 +1764,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
+
+ napi_enable(&ring->napi);
+}
+
+static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
}
/* Initialize a RDMA ring */
@@ -1896,7 +1939,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
return ret;
}
-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
@@ -1915,6 +1958,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
kfree(priv->tx_cbs);
}
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ bcmgenet_fini_tx_ring(priv, DESC_INDEX);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_fini_tx_ring(priv, i);
+
+ __bcmgenet_fini_dma(priv);
+}
+
/* init_edma: Initialize DMA control register */
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
{
@@ -1943,7 +1998,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->tx_cbs) {
- bcmgenet_fini_dma(priv);
+ __bcmgenet_fini_dma(priv);
return -ENOMEM;
}
@@ -1965,9 +2020,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
struct bcmgenet_priv, napi);
unsigned int work_done;
- /* tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
-
work_done = bcmgenet_desc_rx(priv, budget);
/* Advancing our consumer index*/
@@ -2012,28 +2064,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
+ struct bcmgenet_tx_ring *ring;
unsigned int index;
/* Save irq status for bottom-half processing. */
priv->irq1_stat =
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
- ~priv->int1_mask;
+ ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+
/* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
- if (priv->irq1_stat & 0x0000ffff) {
- index = 0;
- for (index = 0; index < 16; index++) {
- if (priv->irq1_stat & (1 << index))
- bcmgenet_tx_reclaim(priv->dev,
- &priv->tx_rings[index]);
+ for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ if (!(priv->irq1_stat & BIT(index)))
+ continue;
+
+ ring = &priv->tx_rings[index];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
}
}
+
return IRQ_HANDLED;
}
@@ -2065,8 +2123,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
if (priv->irq0_stat &
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
- /* Tx reclaim */
- bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+
+ if (likely(napi_schedule_prep(&ring->napi))) {
+ ring->int_disable(priv, ring);
+ __napi_schedule(&ring->napi);
+ }
}
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3a8a90f95365..016bd12bf493 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
+ struct napi_struct napi; /* NAPI per tx queue */
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
struct bcmgenet_tx_ring *);
void (*int_disable)(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *);
+ struct bcmgenet_priv *priv;
};
/* device context */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
}
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
- int addr_len)
+ u8 v6)
{
- return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
- ipv6_clip_hash(ctbl, addr);
+ return v6 ? ipv6_clip_hash(ctbl, addr) :
+ ipv4_clip_hash(ctbl, addr);
}
static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
- int ret = 0;
+ int ret = -1;
if (!ctbl)
return 0;
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
-
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
spin_lock_init(&ce->lock);
atomic_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
- ce->addr_len = addr_len;
- memcpy(ce->addr, lip, addr_len);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
+ ce->addr6.sin6_family = AF_INET6;
+ memcpy(ce->addr6.sin6_addr.s6_addr,
+ lip, sizeof(struct in6_addr));
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
if (ret) {
write_unlock_bh(&ctbl->lock);
return ret;
}
+ } else {
+ ce->addr.sin_family = AF_INET;
+ memcpy((char *)(&ce->addr.sin_addr), lip,
+ sizeof(struct in_addr));
}
} else {
write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
struct clip_entry *ce, *cte;
u32 *addr = (u32 *)lip;
int hash;
- int addr_len;
-
- if (v6)
- addr_len = 16;
- else
- addr_len = 4;
+ int ret = -1;
- hash = clip_addr_hash(ctbl, addr, addr_len);
+ hash = clip_addr_hash(ctbl, addr, v6);
read_lock_bh(&ctbl->lock);
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
- if (addr_len == cte->addr_len &&
- memcmp(lip, cte->addr, cte->addr_len) == 0) {
+ if (cte->addr6.sin6_family == AF_INET6 && v6)
+ ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
+ sizeof(struct in6_addr));
+ else if (cte->addr.sin_family == AF_INET && !v6)
+ ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
+ sizeof(struct in_addr));
+ if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
for (i = 0 ; i < ctbl->clipt_size; ++i) {
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
ip[0] = '\0';
- if (ce->addr_len == 16)
- sprintf(ip, "%pI6c", ce->addr);
- else
- sprintf(ip, "%pI4c", ce->addr);
+ sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip,
atomic_read(&ce->refcnt));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */
atomic_t refcnt;
struct list_head list;
- u32 addr[4];
- int addr_len;
+ union {
+ struct sockaddr_in addr;
+ struct sockaddr_in6 addr6;
+ };
};
struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
- __be32 *buf, int dir);
+ void *buf, int dir);
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
u32 len, __be32 *buf)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
- * @buf: host memory buffer
+ * @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- u32 len, __be32 *buf, int dir)
+ u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
/* Argument sanity checks ...
*/
- if (addr & 0x3)
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
+ buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
- *buf++ = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+ mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
- (__force u32) *buf++);
+ (__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
*/
if (resid) {
union {
- __be32 word;
+ u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
- last.word = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ last.word = le32_to_cpu(
+ (__force __le32)t4_read_reg(adap,
+ mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
- (__force u32) last.word);
+ (__force u32)cpu_to_le32(last.word));
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index a368c0a96ec7..204bd182473b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, notify_intr)) {
- vnic_intr_return_all_credits(&enic->intr[notify_intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
}
if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
struct enic *enic = data;
unsigned int intr = enic_msix_notify_intr(enic);
- vnic_intr_return_all_credits(&enic->intr[intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[intr]);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..178e54028d10 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
struct phy_device *phydev = priv->phydev;
if (unlikely(phydev->link != priv->oldlink ||
- phydev->duplex != priv->oldduplex ||
- phydev->speed != priv->oldspeed))
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
gfar_update_link_state(priv);
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_remove_port);
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ pr_info("Reboot: freeing all eHEA resources\n");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret = NOTIFY_BAD;
+ struct memory_notify *arg = data;
+
+ mutex_lock(&dlpar_mem_lock);
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ pr_info("memory offlining canceled");
+ /* Fall through: re-add canceled memory block */
+
+ case MEM_ONLINE:
+ pr_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ case MEM_GOING_OFFLINE:
+ pr_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ goto out_unlock;
+ ehea_rereg_mrs();
+ break;
+
+ default:
+ break;
+ }
+
+ ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
+
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+ return ret;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
+static void ehea_crash_handler(void)
+{
+ int i;
+
+ if (ehea_fw_handles.arr)
+ for (i = 0; i < ehea_fw_handles.num_entries; i++)
+ ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
+ ehea_fw_handles.arr[i].fwh,
+ FORCE_FREE);
+
+ if (ehea_bcmc_regs.arr)
+ for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
+ ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
+ ehea_bcmc_regs.arr[i].port_id,
+ ehea_bcmc_regs.arr[i].reg_type,
+ ehea_bcmc_regs.arr[i].macaddr,
+ 0, H_DEREG_BCMC);
+}
+
+static atomic_t ehea_memory_hooks_registered;
+
+/* Register memory hooks on probe of first adapter */
+static int ehea_register_memory_hooks(void)
+{
+ int ret = 0;
+
+ if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ return 0;
+
+ ret = ehea_create_busmap();
+ if (ret) {
+ pr_info("ehea_create_busmap failed\n");
+ goto out;
+ }
+
+ ret = register_reboot_notifier(&ehea_reboot_nb);
+ if (ret) {
+ pr_info("register_reboot_notifier failed\n");
+ goto out;
+ }
+
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret) {
+ pr_info("register_memory_notifier failed\n");
+ goto out2;
+ }
+
+ ret = crash_shutdown_register(ehea_crash_handler);
+ if (ret) {
+ pr_info("crash_shutdown_register failed\n");
+ goto out3;
+ }
+
+ return 0;
+
+out3:
+ unregister_memory_notifier(&ehea_mem_nb);
+out2:
+ unregister_reboot_notifier(&ehea_reboot_nb);
+out:
+ return ret;
+}
+
+static void ehea_unregister_memory_hooks(void)
+{
+ if (atomic_read(&ehea_memory_hooks_registered))
+ return;
+
+ unregister_reboot_notifier(&ehea_reboot_nb);
+ if (crash_shutdown_unregister(ehea_crash_handler))
+ pr_info("failed unregistering crash handler\n");
+ unregister_memory_notifier(&ehea_mem_nb);
+}
+
static int ehea_probe_adapter(struct platform_device *dev)
{
struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
int ret;
int i;
+ ret = ehea_register_memory_hooks();
+ if (ret)
+ return ret;
+
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
return 0;
}
-static void ehea_crash_handler(void)
-{
- int i;
-
- if (ehea_fw_handles.arr)
- for (i = 0; i < ehea_fw_handles.num_entries; i++)
- ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
- ehea_fw_handles.arr[i].fwh,
- FORCE_FREE);
-
- if (ehea_bcmc_regs.arr)
- for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
- ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
- ehea_bcmc_regs.arr[i].port_id,
- ehea_bcmc_regs.arr[i].reg_type,
- ehea_bcmc_regs.arr[i].macaddr,
- 0, H_DEREG_BCMC);
-}
-
-static int ehea_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- int ret = NOTIFY_BAD;
- struct memory_notify *arg = data;
-
- mutex_lock(&dlpar_mem_lock);
-
- switch (action) {
- case MEM_CANCEL_OFFLINE:
- pr_info("memory offlining canceled");
- /* Readd canceled memory block */
- case MEM_ONLINE:
- pr_info("memory is going online");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- case MEM_GOING_OFFLINE:
- pr_info("memory is going offline");
- set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
- if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- goto out_unlock;
- ehea_rereg_mrs();
- break;
- default:
- break;
- }
-
- ehea_update_firmware_handles();
- ret = NOTIFY_OK;
-
-out_unlock:
- mutex_unlock(&dlpar_mem_lock);
- return ret;
-}
-
-static struct notifier_block ehea_mem_nb = {
- .notifier_call = ehea_mem_notifier,
-};
-
-static int ehea_reboot_notifier(struct notifier_block *nb,
- unsigned long action, void *unused)
-{
- if (action == SYS_RESTART) {
- pr_info("Reboot: freeing all eHEA resources\n");
- ibmebus_unregister_driver(&ehea_driver);
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
-};
-
static int check_module_parm(void)
{
int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
if (ret)
goto out;
- ret = ehea_create_busmap();
- if (ret)
- goto out;
-
- ret = register_reboot_notifier(&ehea_reboot_nb);
- if (ret)
- pr_info("failed registering reboot notifier\n");
-
- ret = register_memory_notifier(&ehea_mem_nb);
- if (ret)
- pr_info("failed registering memory remove notifier\n");
-
- ret = crash_shutdown_register(ehea_crash_handler);
- if (ret)
- pr_info("failed registering crash handler\n");
-
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
pr_err("failed registering eHEA device driver on ebus\n");
- goto out2;
+ goto out;
}
ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
if (ret) {
pr_err("failed to register capabilities attribute, ret=%d\n",
ret);
- goto out3;
+ goto out2;
}
return ret;
-out3:
- ibmebus_unregister_driver(&ehea_driver);
out2:
- unregister_memory_notifier(&ehea_mem_nb);
- unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(ehea_crash_handler);
+ ibmebus_unregister_driver(&ehea_driver);
out:
return ret;
}
static void __exit ehea_module_exit(void)
{
- int ret;
-
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
- unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(ehea_crash_handler);
- if (ret)
- pr_info("failed unregistering crash handler\n");
- unregister_memory_notifier(&ehea_mem_nb);
+ ehea_unregister_memory_hooks();
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..072426a72745 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
return ret;
}
+static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ u64 mac_address;
+ int rc;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
+ if (rc) {
+ netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
+ return rc;
+ }
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_open = ibmveth_open,
.ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_fix_features = ibmveth_fix_features,
.ndo_set_features = ibmveth_set_features,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ibmveth_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ibmveth_poll_controller,
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index cb19c377e0cc..1da7d05abd38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -875,8 +875,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2828,7 +2829,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b0665509eae6..2f583554a260 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 30cf0be7d1b2..e802b6bc067d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -990,8 +990,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index c3858e7f0e66..56bdaff9f27e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1507,7 +1507,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2690,8 +2695,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3836,6 +3848,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -5246,8 +5264,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5579,7 +5603,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -9849,6 +9874,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9875,12 +9901,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -10034,6 +10054,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 28429c8fbc98..039018abad4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -725,9 +725,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -771,6 +773,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index af350626843e..d4b4aa7c204e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -587,6 +587,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -595,10 +609,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -607,6 +627,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -624,41 +646,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -2356,6 +2362,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2612,6 +2679,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 38449b230d60..4b0b8102cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index d2ff862f0726..fe13ad2def46 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -127,6 +127,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -135,10 +149,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -147,6 +167,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -163,36 +185,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -1405,17 +1411,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1473,13 +1478,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1489,6 +1490,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1579,6 +1585,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1853,6 +1920,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index ffdda716813e..1e49bb1fbac1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -120,6 +120,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 4e789479f00f..b66e03d9711f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
-
+ bool gro_enabled;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ gro_enabled = priv->dev->features & NETIF_F_GRO;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+ priv->dev->features &= ~NETIF_F_GRO;
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+
+ if (gro_enabled)
+ priv->dev->features |= NETIF_F_GRO;
+
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
-#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
- int port;
+ int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
} else {
struct mlx4_update_qp_params params = {.flags = 0};
- mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ if (err)
+ goto out;
}
}
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
- return 0;
+out:
+ return err;
}
static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
if (mac->phydev)
phy_start(mac->phydev);
- init_timer(&mac->tx->clean_timer);
- mac->tx->clean_timer.function = pasemi_mac_tx_timer;
- mac->tx->clean_timer.data = (unsigned long)mac->tx;
- mac->tx->clean_timer.expires = jiffies+HZ;
- add_timer(&mac->tx->clean_timer);
+ setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
+ (unsigned long)mac->tx);
+ mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
} __attribute__ ((aligned(64)));
-/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
struct rcv_desc {
__le16 reference_handle;
__le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
#define NETXEN_IMAGE_START 0x43000 /* compressed image */
#define NETXEN_SECONDARY_START 0x200000 /* backup images */
#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
-#define NETXEN_USER_START 0x3E8000 /* Firmare info */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
-#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
int rc = -EINVAL;
if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firwmare\n");
+ netif_err(tp, ifup, dev, "invalid firmware\n");
goto out;
}
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
RTL_W8(ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
-
- netdev_reset_queue(tp->dev);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 status, len;
u32 opts[2];
int frags;
- bool stop_queue;
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
+ RTL_W8(TxPoll, NPQ);
- if (!skb->xmit_more || stop_queue ||
- netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
- RTL_W8(TxPoll, NPQ);
-
- mmiowb();
- }
+ mmiowb();
- if (stop_queue) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
unsigned int dirty_tx, tx_left;
- unsigned int bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- pkts_compl++;
- bytes_compl += tx_skb->skb->len;
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
-
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
-
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
.tpauser = 1,
.hw_swap = 1,
.rmiimode = 1,
- .shift_rd0 = 1,
};
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
+
+ /* Set MAC address again */
+ update_mac_address(ndev);
}
/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
+ /* TACT bit must be checked before all the following reads */
+ rmb();
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
+ /* RACT bit must be checked before all the following reads */
+ rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* In case of almost all GETHER/ETHERs, the Receive Frame State
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
- * bit 0. However, in case of the R8A7740, R8A779x, and
- * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
+ * bit 0. However, in case of the R8A7740 and R7S72100
+ * the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
+ wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
/* fix the values for the next receiving if RDE is set */
- if (intr_status & EESR_RDE) {
+ if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
u32 count = (sh_eth_read(ndev, RDFAR) -
sh_eth_read(ndev, RDLAR)) >> 4;
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- if (skb_padto(skb, ETH_ZLEN))
+ if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
+ wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index e5a15a4c4e8f..a5d1e6ea7d58 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1280,9 +1280,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1 << rocker_port->pport;
+ val |= 1ULL << rocker_port->pport;
else
- val &= ~(1 << rocker_port->pport);
+ val &= ~(1ULL << rocker_port->pport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -4241,6 +4241,8 @@ static int rocker_probe_ports(struct rocker *rocker)
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
err = rocker_probe_port(rocker, i);
if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
smc->packets_waiting = 0;
smc_reset(dev);
- init_timer(&smc->media);
- smc->media.function = media_check;
- smc->media.data = (u_long) dev;
- smc->media.expires = jiffies + HZ;
- add_timer(&smc->media);
+ setup_timer(&smc->media, media_check, (u_long)dev);
+ mod_timer(&smc->media, jiffies + HZ);
return 0;
} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..209ee1b27f8d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
#include "smc91x.h"
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
#ifndef SMC_NOWAIT
# define SMC_NOWAIT 0
#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = smc_request_attrib(pdev, ndev);
if (ret)
goto out_release_io;
-#if defined(CONFIG_SA1100_ASSABET)
- neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+ if (machine_is_assabet() && machine_has_neponset())
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
- defined(CONFIG_MACH_MAINSTONE) ||\
- defined(CONFIG_MACH_ZYLONITE) ||\
- defined(CONFIG_MACH_LITTLETON) ||\
- defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2) ||\
- defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
#include <asm/mach-types.h>
@@ -74,95 +67,8 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
- unsigned int v = val << 16;
- v |= readl(ioaddr + (reg & ~2)) & 0xffff;
- writel(v, ioaddr + (reg & ~2));
- } else {
- writew(val, ioaddr + reg);
- }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
- defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_ARCH_PXA_IDP) || \
- defined(CONFIG_ARCH_RAMSES) || \
- defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
- if (reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
-
#elif defined(CONFIG_COLDFIRE)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- init_timer(&priv->eee_ctrl_timer);
- priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
- priv->eee_ctrl_timer.data = (unsigned long)priv;
- priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
- add_timer(&priv->eee_ctrl_timer);
+ setup_timer(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer,
+ (unsigned long)priv);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(eee_timer));
priv->hw->mac->set_eee_timer(priv->hw,
STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
*flow_type = IP_USER_FLOW;
break;
default:
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
TCAM_V4KEY0_CLASS_CODE_SHIFT;
ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
if (ret < 0) {
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
parent->index);
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3bc992cd70b7..f6a71092e135 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA
will be called davinci_cpdma. This is recommended.
config TI_CPSW_PHY_SEL
- boolean "TI CPSW Switch Phy sel Support"
+ bool "TI CPSW Switch Phy sel Support"
depends on TI_CPSW
---help---
This driver supports configuring of the phy mode connected to
@@ -77,7 +77,7 @@ config TI_CPSW
will be called cpsw.
config TI_CPTS
- boolean "TI Common Platform Time Sync (CPTS) Support"
+ bool "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW
select PTP_1588_CLOCK
---help---
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, slave->port_vlan);
+ priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
}
return 0;
}
+#endif
-static const struct dev_pm_ops cpsw_pm_ops = {
- .suspend = cpsw_suspend,
- .resume = cpsw_resume,
-};
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
static const struct of_device_id cpsw_of_mtable[] = {
{ .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int davinci_mdio_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend_late = davinci_mdio_suspend,
- .resume_early = davinci_mdio_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 44ff8d7c64a5..5138407941cf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
int i;
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
- if (dev->flags & IFF_ALLMULTI) {
+ if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1e51c6bf3ae1..8362aef0c15e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
} /* else everything is zero */
}
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
struct iov_iter *from, int noblock)
{
- int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
+ int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
struct sk_buff *skb;
struct macvlan_dev *vlan;
unsigned long total_len = iov_iter_count(from);
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
}
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+ skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
linear, noblock, &err);
if (!skb)
goto err;
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
#define XGBE_PHY_SPEEDS 3
#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_10000_BLWC 0
#define SPEED_10000_CDR 0x7
#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x1e
+#define SPEED_10000_PQ 0x12
#define SPEED_10000_RATE 0x0
#define SPEED_10000_TXAMP 0xa
#define SPEED_10000_WORD 0x7
+#define SPEED_10000_DFE_TAP_CONFIG 0x1
+#define SPEED_10000_DFE_TAP_ENABLE 0x7f
#define SPEED_2500_BLWC 1
#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_2500_RATE 0x1
#define SPEED_2500_TXAMP 0xf
#define SPEED_2500_WORD 0x1
+#define SPEED_2500_DFE_TAP_CONFIG 0x3
+#define SPEED_2500_DFE_TAP_ENABLE 0x0
#define SPEED_1000_BLWC 1
#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_1000_RATE 0x3
#define SPEED_1000_TXAMP 0xf
#define SPEED_1000_WORD 0x1
+#define SPEED_1000_DFE_TAP_CONFIG 0x3
+#define SPEED_1000_DFE_TAP_ENABLE 0x0
/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
#define RXTX_REG20_BLWC_ENA_INDEX 2
#define RXTX_REG20_BLWC_ENA_WIDTH 1
#define RXTX_REG114_PQ_REG_INDEX 9
#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
SPEED_10000_TXAMP,
};
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+ SPEED_1000_DFE_TAP_CONFIG,
+ SPEED_2500_DFE_TAP_CONFIG,
+ SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+ SPEED_1000_DFE_TAP_ENABLE,
+ SPEED_2500_DFE_TAP_ENABLE,
+ SPEED_10000_DFE_TAP_ENABLE,
+};
+
enum amd_xgbe_phy_an {
AMD_XGBE_AN_READY = 0,
AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
/* Auto-negotiation state machine support */
struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
status = XSIR0_IOREAD(priv, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- return;
+ goto rx_reset;
}
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
}
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
sizeof(priv->serdes_tx_amp));
}
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_CFG_PROPERTY,
+ priv->serdes_dfe_tap_cfg,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_CFG_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_cfg,
+ amd_xgbe_phy_serdes_dfe_tap_cfg,
+ sizeof(priv->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_ENA_PROPERTY,
+ priv->serdes_dfe_tap_ena,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_ENA_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_ena,
+ amd_xgbe_phy_serdes_dfe_tap_ena,
+ sizeof(priv->serdes_dfe_tap_ena));
+ }
+
phydev->priv = priv;
if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index cdcac6aa4260..52cd8db2c57d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
}
/**
+ * phy_check_valid - check if there is a valid PHY setting which matches
+ * speed, duplex, and feature mask
+ * @speed: speed to match
+ * @duplex: duplex to match
+ * @features: A mask of the valid settings
+ *
+ * Description: Returns true if there is a valid setting, false otherwise.
+ */
+static inline bool phy_check_valid(int speed, int duplex, u32 features)
+{
+ unsigned int idx;
+
+ idx = phy_find_valid(phy_find_setting(speed, duplex), features);
+
+ return settings[idx].speed == speed && settings[idx].duplex == duplex &&
+ (settings[idx].setting & features);
+}
+
+/**
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
* @phydev: the target phy_device struct
*
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
int status;
- unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- idx = phy_find_setting(phydev->speed, phydev->duplex);
- if (!(lp & adv & settings[idx].setting))
+ if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
goto eee_exit_err;
if (clk_stop_enable) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a7d163bf5bbb..9d3366f7c9ad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
static struct team_port *team_port_get_rcu(const struct net_device *dev)
{
- struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
- return team_port_exists(dev) ? port : NULL;
+ return rcu_dereference(dev->rx_handler_data);
}
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 37eed4d84e9c..7ba8d0885f12 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
* Linksys USB200M
* Netgear FA120
* Sitecom LN-029
+ * Sitecom LN-028
* Intellinet USB 2.0 Ethernet
* ST Lab USB 2.0 Ethernet
* TrendNet TU2-ET100
@@ -397,14 +398,14 @@ config USB_NET_CDC_SUBSET
not generally have permanently assigned Ethernet addresses.
config USB_ALI_M5632
- boolean "ALi M5632 based 'USB 2.0 Data Link' cables"
+ bool "ALi M5632 based 'USB 2.0 Data Link' cables"
depends on USB_NET_CDC_SUBSET
help
Choose this option if you're using a host-to-host cable
based on this design, which supports USB 2.0 high speed.
config USB_AN2720
- boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
+ bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
depends on USB_NET_CDC_SUBSET
help
Choose this option if you're using a host-to-host cable
@@ -412,7 +413,7 @@ config USB_AN2720
Cypress brand.
config USB_BELKIN
- boolean "eTEK based host-to-host cables (Advance, Belkin, ...)"
+ bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
depends on USB_NET_CDC_SUBSET
default y
help
@@ -421,7 +422,7 @@ config USB_BELKIN
microcontroller, with LEDs that indicate traffic.
config USB_ARMLINUX
- boolean "Embedded ARM Linux links (iPaq, ...)"
+ bool "Embedded ARM Linux links (iPaq, ...)"
depends on USB_NET_CDC_SUBSET
default y
help
@@ -438,14 +439,14 @@ config USB_ARMLINUX
this simpler protocol by installing a different kernel.
config USB_EPSON2888
- boolean "Epson 2888 based firmware (DEVELOPMENT)"
+ bool "Epson 2888 based firmware (DEVELOPMENT)"
depends on USB_NET_CDC_SUBSET
help
Choose this option to support the usb networking links used
by some sample firmware from Epson.
config USB_KC2190
- boolean "KT Technology KC2190 based cables (InstaNet)"
+ bool "KT Technology KC2190 based cables (InstaNet)"
depends on USB_NET_CDC_SUBSET
help
Choose this option if you're using a host-to-host cable
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0df6, 0x0056),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+ USB_DEVICE (0x0df6, 0x061c),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 3c8dfe5e46ed..111d907e0c11 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1597,7 +1597,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
}
cprev = cnow;
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tiocmget->waitq, &wait);
return ret;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 3d18bb0eee85..1bfe0fcaccf5 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
}, {
USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
.driver_info = (unsigned long) &prolific_info,
+}, {
+ USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
+ * Host-to-Host Cable
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 110a2cf67244..f1ff3666f090 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1710,6 +1710,12 @@ static int virtnet_probe(struct virtio_device *vdev)
struct virtnet_info *vi;
u16 max_queue_pairs;
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
if (!virtnet_validate_features(vdev))
return -EINVAL;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 83c39e2858bf..88d121d43c08 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->rxwaitq, &wait);
while (!chan->rx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->rx_status == 0) {
chan->rx_status = 1;
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
spin_unlock_irqrestore(&cosa->lock, flags);
mutex_unlock(&chan->rlock);
return -ERESTARTSYS;
}
}
remove_wait_queue(&chan->rxwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
kbuf = chan->rxdata;
count = chan->rxsize;
spin_unlock_irqrestore(&cosa->lock, flags);
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->txwaitq, &wait);
while (!chan->tx_status) {
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
spin_lock_irqsave(&cosa->lock, flags);
if (signal_pending(current) && chan->tx_status == 0) {
chan->tx_status = 1;
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
}
}
remove_wait_queue(&chan->txwaitq, &wait);
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
up(&chan->wsem);
spin_unlock_irqrestore(&cosa->lock, flags);
kfree(kbuf);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c1947c5915eb..d56b7859a437 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- genlmsg_unicast(&init_net, skb, dst_portid);
+ if (genlmsg_unicast(&init_net, skb, dst_portid))
+ goto err_free_txskb;
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
return;
nla_put_failure:
+ nlmsg_free(skb);
+err_free_txskb:
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
ieee80211_free_txskb(hw, my_skb);
data->tx_failed++;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 006b8bcb2e31..2b4ef256c6b9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -243,14 +243,14 @@ config RT2X00_LIB
select AVERAGE
config RT2X00_LIB_FIRMWARE
- boolean
+ bool
select FW_LOADER
config RT2X00_LIB_CRYPTO
- boolean
+ bool
config RT2X00_LIB_LEDS
- boolean
+ bool
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
config RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned long flags;
do {
+ int notify;
+
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+
if (cons == end)
break;
txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
+ int notify;
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
+
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
}
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,