diff options
163 files changed, 3147 insertions, 2131 deletions
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c index bce8bac311c9..cca2afc945af 100644 --- a/drivers/net/caif/caif_shmcore.c +++ b/drivers/net/caif/caif_shmcore.c @@ -338,11 +338,8 @@ static void shm_rx_work_func(struct work_struct *rx_work) /* Get a suitable CAIF packet and copy in data. */ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, frm_pck_len + 1); - - if (skb == NULL) { - pr_info("OOM: Try next frame in descriptor\n"); + if (skb == NULL) break; - } p = skb_put(skb, frm_pck_len); memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a175d0be1ae1..ee705771bd2c 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev) /* allocate a new skb for next time receive */ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); - if (!new_skb) { - pr_notice("init: low on mem - packet dropped\n"); + if (!new_skb) goto init_error; - } + skb_reserve(new_skb, NET_IP_ALIGN); /* Invidate the data cache of skb->data range when it is write back * cache. It will prevent overwritting the new data from DMA @@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev) new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - netdev_notice(dev, "rx: low on mem - packet dropped\n"); dev->stats.rx_dropped++; goto out; } diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 6e722dc37db7..65926a956575 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { - printk ("%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 3789affbc0e5..0866e7627433 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 60e2b701afe7..9793767996a2 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv) dev->stats.rx_packets++; } else { am_writeword (dev, hdraddr + 2, RMD_OWN); - printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 98f4522fd17b..c178eb4c8166 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); for (i = 0; i < RX_RING_SIZE; i++) if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) break; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 84219df72f51..ab9bedb8d276 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev ) else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); for( i = 0; i < RX_RING_SIZE; i++ ) if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & RMD1_OWN_CHIP) diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index de774d419144..688aede742c7 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev) frmlen -= 4; /* Remove FCS */ skb = netdev_alloc_skb(dev, frmlen + 2); if (skb == NULL) { - netdev_err(dev, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; continue; } diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index baca0bd1b393..3d86ffeb4e15 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == 0) { - printk("%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; *rds_ptr(rd, mblength, lp->type) = 0; *rds_ptr(rd, rmd1, lp->type) = diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 797f847edf13..ed2130727643 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev, skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (skb == NULL) { - netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 74b3891b6483..de412d331a72 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev ) else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); - dev->stats.rx_dropped++; head->msg_length = 0; head->flag |= RMD1_OWN_CHIP; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 6a40290d3727..70d543063993 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; @@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..e1f1b2a0673a 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) - 4; /* CRC */ skb = netdev_alloc_skb_ip_align(netdev, packet_size); - if (skb == NULL) { - netdev_warn(netdev, - "Memory squeeze, deferring packet\n"); + if (skb == NULL) goto skip_pkt; - } + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 1278b47022e0..a046b6ff847c 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) /* alloc new buffer */ skb = netdev_alloc_skb_ip_align(netdev, rx_size); if (NULL == skb) { - printk(KERN_WARNING - "%s: Mem squeeze, deferring packet.\n", - netdev->name); /* * Check that some rx space is free. If not, * free one and mark stats->rx_dropped++. diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 7d81e059e811..db343a1d0835 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1619,7 +1619,6 @@ static int bcm_enet_probe(struct platform_device *pdev) struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; const char *clk_name; - unsigned int iomem_size; int i, ret; /* stop if shared driver failed, assume driver->probe will be @@ -1644,17 +1643,12 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - iomem_size = resource_size(res_mem); - if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { - ret = -EBUSY; - goto out; - } - - priv->base = ioremap(res_mem->start, iomem_size); + priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); if (priv->base == NULL) { ret = -ENOMEM; - goto out_release_mem; + goto out; } + dev->irq = priv->irq = res_irq->start; priv->irq_rx = res_irq_rx->start; priv->irq_tx = res_irq_tx->start; @@ -1674,9 +1668,9 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->mac_clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); - goto out_unmap; + goto out; } - clk_enable(priv->mac_clk); + clk_prepare_enable(priv->mac_clk); /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1705,7 +1699,7 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->phy_clk = NULL; goto out_put_clk_mac; } - clk_enable(priv->phy_clk); + clk_prepare_enable(priv->phy_clk); } /* do minimal hardware init to be able to probe mii bus */ @@ -1733,7 +1727,8 @@ static int bcm_enet_probe(struct platform_device *pdev) * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); - bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, + GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto out_free_mdio; @@ -1794,10 +1789,8 @@ static int bcm_enet_probe(struct platform_device *pdev) return 0; out_unregister_mdio: - if (priv->mii_bus) { + if (priv->mii_bus) mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); - } out_free_mdio: if (priv->mii_bus) @@ -1807,19 +1800,13 @@ out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->phy_clk) { - clk_disable(priv->phy_clk); + clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } out_put_clk_mac: - clk_disable(priv->mac_clk); + clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); - -out_unmap: - iounmap(priv->base); - -out_release_mem: - release_mem_region(res_mem->start, iomem_size); out: free_netdev(dev); return ret; @@ -1833,7 +1820,6 @@ static int bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; - struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); @@ -1845,7 +1831,6 @@ static int bcm_enet_remove(struct platform_device *pdev) if (priv->has_phy) { mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } else { struct bcm63xx_enet_platform_data *pd; @@ -1856,17 +1841,12 @@ static int bcm_enet_remove(struct platform_device *pdev) bcm_enet_mdio_write_mii); } - /* release device resources */ - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - /* disable hw block clocks */ if (priv->phy_clk) { - clk_disable(priv->phy_clk); + clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } - clk_disable(priv->mac_clk); + clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); platform_set_drvdata(pdev, NULL); @@ -1889,31 +1869,20 @@ struct platform_driver bcm63xx_enet_driver = { static int bcm_enet_shared_probe(struct platform_device *pdev) { struct resource *res; - unsigned int iomem_size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; - iomem_size = resource_size(res); - if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) - return -EBUSY; - - bcm_enet_shared_base = ioremap(res->start, iomem_size); - if (!bcm_enet_shared_base) { - release_mem_region(res->start, iomem_size); + bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); + if (!bcm_enet_shared_base) return -ENOMEM; - } + return 0; } static int bcm_enet_shared_remove(struct platform_device *pdev) { - struct resource *res; - - iounmap(bcm_enet_shared_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); return 0; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index da5f4397f87c..eec0af45b859 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/mii.h> +#include <linux/phy.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <bcm47xx_nvram.h> @@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, /* Alloc skb */ slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!slot->skb) { - bgmac_err(bgmac, "Allocation of skb failed!\n"); + if (!slot->skb) return -ENOMEM; - } /* Poison - if everything goes fine, hardware will overwrite it */ rx = (struct bgmac_rx_header *)slot->skb->data; @@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = { }; /************************************************** + * MII + **************************************************/ + +static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return bgmac_phy_read(bus->priv, mii_id, regnum); +} + +static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + return bgmac_phy_write(bus->priv, mii_id, regnum, value); +} + +static int bgmac_mii_register(struct bgmac *bgmac) +{ + struct mii_bus *mii_bus; + int i, err = 0; + + mii_bus = mdiobus_alloc(); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "bgmac mii bus"; + sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, + bgmac->core->core_unit); + mii_bus->priv = bgmac; + mii_bus->read = bgmac_mii_read; + mii_bus->write = bgmac_mii_write; + mii_bus->parent = &bgmac->core->dev; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); + + mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!mii_bus->irq) { + err = -ENOMEM; + goto err_free_bus; + } + for (i = 0; i < PHY_MAX_ADDR; i++) + mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(mii_bus); + if (err) { + bgmac_err(bgmac, "Registration of mii bus failed\n"); + goto err_free_irq; + } + + bgmac->mii_bus = mii_bus; + + return err; + +err_free_irq: + kfree(mii_bus->irq); +err_free_bus: + mdiobus_free(mii_bus); + return err; +} + +static void bgmac_mii_unregister(struct bgmac *bgmac) +{ + struct mii_bus *mii_bus = bgmac->mii_bus; + + mdiobus_unregister(mii_bus); + kfree(mii_bus->irq); + mdiobus_free(mii_bus); +} + +/************************************************** * BCMA bus ops **************************************************/ @@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core) if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + err = bgmac_mii_register(bgmac); + if (err) { + bgmac_err(bgmac, "Cannot register MDIO\n"); + err = -ENOTSUPP; + goto err_dma_free; + } + err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); err = -ENOTSUPP; - goto err_dma_free; + goto err_mii_unregister; } netif_carrier_off(net_dev); @@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core) return 0; +err_mii_unregister: + bgmac_mii_unregister(bgmac); err_dma_free: bgmac_dma_free(bgmac); @@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core) netif_napi_del(&bgmac->napi); unregister_netdev(bgmac->net_dev); + bgmac_mii_unregister(bgmac); bgmac_dma_free(bgmac); bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4ede614c81f8..98d4b5fcc070 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -399,6 +399,7 @@ struct bgmac { struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ struct net_device *net_dev; struct napi_struct napi; + struct mii_bus *mii_bus; /* DMA */ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e4605a965084..9577cceafac4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t { struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ -#define BNX2X_NAPI_WEIGHT 128 struct napi_struct napi; union host_hc_status_block status_blk; /* chip independed shortcuts into sb structure */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index aee7671ff4c1..8d158d8240a2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -834,7 +834,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) /* Add NAPI objects */ for_each_rx_queue_cnic(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); + bnx2x_poll, NAPI_POLL_WEIGHT); } static inline void bnx2x_add_all_napi(struct bnx2x *bp) @@ -844,7 +844,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) /* Add NAPI objects */ for_each_eth_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); + bnx2x_poll, NAPI_POLL_WEIGHT); } static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index e9b35da375cb..e80bfb60c3ef 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + NET_IP_ALIGN); - if (sb_new == NULL) { - pr_info("%s: sk_buff allocation failed\n", - d->sbdma_eth->sbm_dev->name); + if (sb_new == NULL) return -ENOBUFS; - } sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 93729f942358..0c1a2ef163a5 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) #define FIRMWARE_TG3 "tigon/tg3.bin" +#define FIRMWARE_TG357766 "tigon/tg357766.bin" #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" @@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) #define TX_CPU_SCRATCH_SIZE 0x04000 /* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) { int i; + const int iters = 10000; - BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + for (i = 0; i < iters; i++) { + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) + break; + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ +static int tg3_rxcpu_pause(struct tg3 *tp) +{ + int rc = tg3_pause_cpu(tp, RX_CPU_BASE); + + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + udelay(10); + + return rc; +} + +/* tp->lock is held. */ +static int tg3_txcpu_pause(struct tg3 *tp) +{ + return tg3_pause_cpu(tp, TX_CPU_BASE); +} + +/* tp->lock is held. */ +static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) +{ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); +} + +/* tp->lock is held. */ +static void tg3_rxcpu_resume(struct tg3 *tp) +{ + tg3_resume_cpu(tp, RX_CPU_BASE); +} + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) +{ + int rc; + + BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); if (tg3_asic_rev(tp) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); @@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); return 0; } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); + if (cpu_base == RX_CPU_BASE) { + rc = tg3_rxcpu_pause(tp); } else { /* * There is only an Rx CPU for the 5750 derivative in the @@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) if (tg3_flag(tp, IS_SSB_CORE)) return 0; - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } + rc = tg3_txcpu_pause(tp); } - if (i >= 10000) { + if (rc) { netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); return -ENODEV; } @@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) return 0; } -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const __be32 *fw_data; -}; +static int tg3_fw_data_len(struct tg3 *tp, + const struct tg3_firmware_hdr *fw_hdr) +{ + int fw_len; + + /* Non fragmented firmware have one firmware header followed by a + * contiguous chunk of data to be written. The length field in that + * header is not the length of data to be written but the complete + * length of the bss. The data length is determined based on + * tp->fw->size minus headers. + * + * Fragmented firmware have a main header followed by multiple + * fragments. Each fragment is identical to non fragmented firmware + * with a firmware header followed by a contiguous chunk of data. In + * the main header, the length field is unused and set to 0xffffffff. + * In each fragment header the length is the entire size of that + * fragment i.e. fragment data + header length. Data length is + * therefore length field in the header minus TG3_FW_HDR_LEN. + */ + if (tp->fw_len == 0xffffffff) + fw_len = be32_to_cpu(fw_hdr->len); + else + fw_len = tp->fw->size; + + return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); +} /* tp->lock is held. */ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, int cpu_scratch_size, - struct fw_info *info) + const struct tg3_firmware_hdr *fw_hdr) { - int err, lock_err, i; + int err, i; void (*write_op)(struct tg3 *, u32, u32); + int total_len = tp->fw->size; if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { netdev_err(tp->dev, @@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, return -EINVAL; } - if (tg3_flag(tp, 5705_PLUS)) + if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) write_op = tg3_write_mem; else write_op = tg3_write_indirect_reg32; - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; + if (tg3_asic_rev(tp) != ASIC_REV_57766) { + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + int lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - be32_to_cpu(info->fw_data[i])); + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, + tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); + } else { + /* Subtract additional main header for fragmented firmware and + * advance to the first fragment + */ + total_len -= TG3_FW_HDR_LEN; + fw_hdr++; + } + + do { + u32 *fw_data = (u32 *)(fw_hdr + 1); + for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) + write_op(tp, cpu_scratch_base + + (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + + (i * sizeof(u32)), + be32_to_cpu(fw_data[i])); + + total_len -= be32_to_cpu(fw_hdr->len); + + /* Advance to next fragment */ + fw_hdr = (struct tg3_firmware_hdr *) + ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); + } while (total_len > 0); err = 0; @@ -3552,13 +3627,33 @@ out: } /* tp->lock is held. */ +static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) +{ + int i; + const int iters = 5; + + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, pc); + + for (i = 0; i < iters; i++) { + if (tr32(cpu_base + CPU_PC) == pc) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, pc); + udelay(1000); + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) { - struct fw_info info; - const __be32 *fw_data; - int err, i; + const struct tg3_firmware_hdr *fw_hdr; + int err; - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. @@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) Remainder is the blob to be loaded contiguously from start address. */ - info.fw_base = be32_to_cpu(fw_data[1]); - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); + fw_hdr); if (err) return err; err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); + fw_hdr); if (err) return err; /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) - break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { + err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + tr32(RX_CPU_BASE + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); return -ENODEV; } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + tg3_rxcpu_resume(tp); return 0; } +static int tg3_validate_rxcpu_state(struct tg3 *tp) +{ + const int iters = 1000; + int i; + u32 val; + + /* Wait for boot code to complete initialization and enter service + * loop. It is then safe to download service patches + */ + for (i = 0; i < iters; i++) { + if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) + break; + + udelay(10); + } + + if (i == iters) { + netdev_err(tp->dev, "Boot code not ready for service patches\n"); + return -EBUSY; + } + + val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); + if (val & 0xff) { + netdev_warn(tp->dev, + "Other patches exist. Not downloading EEE patch\n"); + return -EEXIST; + } + + return 0; +} + +/* tp->lock is held. */ +static void tg3_load_57766_firmware(struct tg3 *tp) +{ + struct tg3_firmware_hdr *fw_hdr; + + if (!tg3_flag(tp, NO_NVRAM)) + return; + + if (tg3_validate_rxcpu_state(tp)) + return; + + if (!tp->fw) + return; + + /* This firmware blob has a different format than older firmware + * releases as given below. The main difference is we have fragmented + * data to be written to non-contiguous locations. + * + * In the beginning we have a firmware header identical to other + * firmware which consists of version, base addr and length. The length + * here is unused and set to 0xffffffff. + * + * This is followed by a series of firmware fragments which are + * individually identical to previous firmware. i.e. they have the + * firmware header and followed by data for that fragment. The version + * field of the individual fragment header is unused. + */ + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) + return; + + if (tg3_rxcpu_pause(tp)) + return; + + /* tg3_load_firmware_cpu() will always succeed for the 57766 */ + tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); + + tg3_rxcpu_resume(tp); +} + /* tp->lock is held. */ static int tg3_load_tso_firmware(struct tg3 *tp) { - struct fw_info info; - const __be32 *fw_data; + const struct tg3_firmware_hdr *fw_hdr; unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; + int err; - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) + if (!tg3_flag(tp, FW_TSO)) return 0; - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. @@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp) Remainder is the blob to be loaded contiguously from start address. */ - info.fw_base = be32_to_cpu(fw_data[1]); cpu_scratch_size = tp->fw_len; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; if (tg3_asic_rev(tp) == ASIC_REV_5705) { cpu_base = RX_CPU_BASE; @@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp) err = tg3_load_firmware_cpu(tp, cpu_base, cpu_scratch_base, cpu_scratch_size, - &info); + fw_hdr); if (err) return err; /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) - break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { + err = tg3_pause_cpu_and_set_pc(tp, cpu_base, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); + __func__, tr32(cpu_base + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); return -ENODEV; } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); + + tg3_resume_cpu(tp, cpu_base); return 0; } @@ -9773,6 +9914,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) return err; } + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + /* Ignore any errors for the firmware download. If download + * fails, the device will operate with EEE disabled + */ + tg3_load_57766_firmware(tp); + } + if (tg3_flag(tp, TSO_CAPABLE)) { err = tg3_load_tso_firmware(tp); if (err) @@ -10562,7 +10710,7 @@ static int tg3_test_msi(struct tg3 *tp) static int tg3_request_firmware(struct tg3 *tp) { - const __be32 *fw_data; + const struct tg3_firmware_hdr *fw_hdr; if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", @@ -10570,15 +10718,15 @@ static int tg3_request_firmware(struct tg3 *tp) return -ENOENT; } - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by * start address and _full_ length including BSS sections * (which must be longer than the actual data, of course */ - tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ - if (tp->fw_len < (tp->fw->size - 12)) { + tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ + if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { netdev_err(tp->dev, "bogus length %d in \"%s\"\n", tp->fw_len, tp->fw_needed); release_firmware(tp->fw); @@ -10877,7 +11025,15 @@ static int tg3_open(struct net_device *dev) if (tp->fw_needed) { err = tg3_request_firmware(tp); - if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + if (err) { + netdev_warn(tp->dev, "EEE capability disabled\n"); + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "EEE capability restored\n"); + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + } + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { if (err) return err; } else if (err) { @@ -14507,6 +14663,7 @@ static int tg3_phy_probe(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && (tg3_asic_rev(tp) == ASIC_REV_5719 || tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_57766 || tg3_asic_rev(tp) == ASIC_REV_5762 || (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || @@ -15289,7 +15446,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && tg3_asic_rev(tp) != ASIC_REV_5701 && tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { - tg3_flag_set(tp, TSO_BUG); + tg3_flag_set(tp, FW_TSO); + tg3_flag_set(tp, TSO_BUG); if (tg3_asic_rev(tp) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else @@ -15300,7 +15458,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || - tp->fw_needed) { + tg3_flag(tp, FW_TSO)) { /* For firmware TSO, assume ASF is disabled. * We'll disable TSO later if we discover ASF * is enabled in tg3_get_eeprom_hw_cfg(). @@ -15315,6 +15473,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) tp->fw_needed = FIRMWARE_TG3; + if (tg3_asic_rev(tp) == ASIC_REV_57766) + tp->fw_needed = FIRMWARE_TG357766; + tp->irq_max = 1; if (tg3_flag(tp, 5750_PLUS)) { @@ -15587,7 +15748,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) */ tg3_get_eeprom_hw_cfg(tp); - if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { + if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; @@ -15775,6 +15936,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) udelay(50); tg3_nvram_init(tp); + /* If the device has an NVRAM, no need to load patch firmware */ + if (tg3_asic_rev(tp) == ASIC_REV_57766 && + !tg3_flag(tp, NO_NVRAM)) + tp->fw_needed = NULL; + grc_misc_cfg = tr32(GRC_MISC_CFG); grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 8d7d4c2ab5d6..1cdc1b641c77 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2222,6 +2222,12 @@ #define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 #define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 +#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000 +#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000 +#define TG3_57766_FW_BASE_ADDR 0x00030000 +#define TG3_57766_FW_HANDSHAKE 0x0003fccc +#define TG3_SBROM_IN_SERVICE_LOOP 0x51 + #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 @@ -3009,17 +3015,18 @@ enum TG3_FLAGS { TG3_FLAG_JUMBO_CAPABLE, TG3_FLAG_CHIP_RESETTING, TG3_FLAG_INIT_COMPLETE, - TG3_FLAG_TSO_BUG, TG3_FLAG_MAX_RXPEND_64, - TG3_FLAG_TSO_CAPABLE, TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ TG3_FLAG_ASF_NEW_HANDSHAKE, TG3_FLAG_HW_AUTONEG, TG3_FLAG_IS_NIC, TG3_FLAG_FLASH, + TG3_FLAG_FW_TSO, TG3_FLAG_HW_TSO_1, TG3_FLAG_HW_TSO_2, TG3_FLAG_HW_TSO_3, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_TSO_BUG, TG3_FLAG_ICH_WORKAROUND, TG3_FLAG_1SHOT_MSI, TG3_FLAG_NO_FWARE_REPORTED, @@ -3064,6 +3071,13 @@ enum TG3_FLAGS { TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ }; +struct tg3_firmware_hdr { + __be32 version; /* unused for fragments */ + __be32 base_addr; + __be32 len; +}; +#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr)) + struct tg3 { /* begin "general, frequently-used members" cacheline section */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 3227fdde521b..f2b73ffa9122 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); -static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, +static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, u32 boot_param); static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 3becdb2deb46..5bd7786e8413 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -209,7 +209,6 @@ static void at91ether_rx(struct net_device *dev) netif_rx(skb); } else { lp->stats.rx_dropped++; - netdev_notice(dev, "Memory squeeze, dropping packet.\n"); } if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) @@ -519,18 +518,7 @@ static struct platform_driver at91ether_driver = { }, }; -static int __init at91ether_init(void) -{ - return platform_driver_probe(&at91ether_driver, at91ether_probe); -} - -static void __exit at91ether_exit(void) -{ - platform_driver_unregister(&at91ether_driver); -} - -module_init(at91ether_init) -module_exit(at91ether_exit) +module_platform_driver_probe(at91ether_driver, at91ether_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 79039439bfdc..3a5d680ff8f9 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1737,18 +1737,7 @@ static struct platform_driver macb_driver = { }, }; -static int __init macb_init(void) -{ - return platform_driver_probe(&macb_driver, macb_probe); -} - -static void __exit macb_exit(void) -{ - platform_driver_unregister(&macb_driver); -} - -module_init(macb_init); -module_exit(macb_exit); +module_platform_driver_probe(macb_driver, macb_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 138446957786..aaa0499aa19c 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -478,9 +478,6 @@ dma_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { - /* I don't think we want to do this to a stressed system */ - cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n", - dev->name); dev->stats.rx_dropped++; /* AKPM: advance bp to the next frame */ @@ -731,9 +728,6 @@ net_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { -#if 0 /* Again, this seems a cruel thing to do */ - pr_warn("%s: Memory squeeze, dropping packet\n", dev->name); -#endif dev->stats.rx_dropped++; return; } @@ -1978,18 +1972,6 @@ static struct platform_driver cs89x0_driver = { .remove = cs89x0_platform_remove, }; -static int __init cs89x0_init(void) -{ - return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe); -} - -module_init(cs89x0_init); - -static void __exit cs89x0_cleanup(void) -{ - platform_driver_unregister(&cs89x0_driver); -} - -module_exit(cs89x0_cleanup); +module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); #endif /* CONFIG_CS89x0_PLATFORM */ diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 110d26f4c602..afa8e3af2c4d 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -580,12 +580,9 @@ alloc_list (struct net_device *dev) skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; - if (skb == NULL) { - printk (KERN_ERR - "%s: alloc_list: allocate Rx buffer error! ", - dev->name); + if (skb == NULL) break; - } + /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 29aff55f2eea..2e2700e3a5ab 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 3c9b4f12e3e5..6ed46396bb36 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 96970860c915..6ef4575ce1c8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 76b302f30c87..053f00d006c0 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 62dc220695f7..89e6d8cfaf0d 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..b8e5019398f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 55d32aa0a093..f3d126dcc104 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index db4ea8081c07..276572998463 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 069a155d16ed..f97c60f78089 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -743,8 +743,6 @@ fec_enet_rx(struct net_device *ndev, int budget) skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); if (unlikely(!skb)) { - printk("%s: Memory squeeze, dropping packet.\n", - ndev->name); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 46df28893c10..edc120094c34 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) received++; netif_receive_skb(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev) received++; netif_rx(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev) */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - if (skb == NULL) { - dev_warn(fep->dev, - "Memory squeeze, unable to allocate skb\n"); + if (skb == NULL) break; - } + skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, @@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, /* Alloc new skb */ new_skb = netdev_alloc_skb(dev, skb->len + 4); - if (!new_skb) { - if (net_ratelimit()) { - dev_warn(fep->dev, - "Memory squeeze, dropping tx packet.\n"); - } + if (!new_skb) return NULL; - } /* Make sure new skb is properly aligned */ skb_align(new_skb, 4); diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 2418faf2251a..84125707f321 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -1003,8 +1003,6 @@ static void fjn_rx(struct net_device *dev) } skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n", - pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_dropped++; break; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 1c54e229e3cc..e38816145395 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev) #ifdef __mc68000__ cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); #endif - } - else + } else { skb = netdev_alloc_skb(dev, pkt_len + 2); + } memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ - printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; - } - else { + } else { if (!rx_in_place) { /* 16 byte align the data fields */ skb_reserve(skb, 2); diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f045ea4dc514..d653bac4cfc4 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev) rbd->v_data = newskb->data; rbd->b_data = SWAP32(dma_addr); DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); - } else + } else { skb = netdev_alloc_skb_ip_align(dev, pkt_len); + } memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ - printk(KERN_ERR - "%s: i596_rx Memory squeeze, dropping packet.\n", - dev->name); dev->stats.rx_dropped++; } else { if (!rx_in_place) { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 328f47c92e26..029633434474 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { - netdev_info(dev, "Unable to allocate enough skb in the array\n"); pr->rq1_skba.os_skbs = fill_wqes - i; break; } @@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) for (i = 0; i < nr_rq1a; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) { - netdev_info(dev, "Not enough memory to allocate skb array\n"); + if (!skb_arr_rq1[i]) break; - } } /* Ring doorbell */ ehea_update_rq1a(pr->qp, i - 1); @@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev, skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb) { - netdev_err(dev, "Not enough memory to allocate skb\n"); + if (!skb) break; - } } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index e0991388664c..b71c8502a2b3 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -37,7 +37,9 @@ * "index + 5". */ static const u16 e1000_gg82563_cable_length_table[] = { - 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF +}; + #define GG82563_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_gg82563_cable_length_table) @@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. @@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ - udelay(200); + usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); @@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return -E1000_ERR_PHY; } - udelay(200); + usleep_range(200, 400); ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & offset, + data); - udelay(200); + usleep_range(200, 400); } else { ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & offset, + data); } e1000_release_phy_80003es2lan(hw); @@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ - udelay(200); + usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); @@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return -E1000_ERR_PHY; } - udelay(200); + usleep_range(200, 400); ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & + offset, data); - udelay(200); + usleep_range(200, 400); } else { ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & + offset, data); } e1000_release_phy_80003es2lan(hw); @@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; @@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; } @@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, s32 ret_val; if (hw->phy.media_type == e1000_media_type_copper) { - ret_val = e1000e_get_speed_and_duplex_copper(hw, - speed, - duplex); + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); hw->phy.ops.cfg_on_link_up(hw); } else { ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, - speed, - duplex); + speed, + duplex); } return ret_val; @@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); @@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); /* Enable retransmit on late collisions */ @@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* default to true to enable the MDIC W/A */ hw->dev_spec.e80003es2lan.mdic_wa_enable = true; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET >> - E1000_KMRNCTRLSTA_OFFSET_SHIFT, - &i); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); if (!ret_val) { if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == - E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) hw->dev_spec.e80003es2lan.mdic_wa_enable = false; } @@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; - u32 ctrl_ext; + u32 reg; u16 data; ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); @@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) } /* Bypass Rx and Tx FIFO's */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, - E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | - E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - &data); + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); if (ret_val) return ret_val; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - data); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; @@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - ctrl_ext = er32(CTRL_EXT); - ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); - ew32(CTRL_EXT, ctrl_ext); + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + ew32(CTRL_EXT, reg); ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); if (ret_val) @@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) * polling the phy; this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), - 0xFFFF); + 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - ®_data); + ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - reg_data); + reg_data); if (ret_val) return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - ®_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); if (ret_val) return ret_val; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, - &duplex); + &duplex); if (ret_val) return ret_val; @@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) u16 reg_data, reg_data2; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) u32 i = 0; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = { .phy_ops = &es2_phy_ops, .nvm_ops = &es2_nvm_ops, }; - diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 2faffbde179e..7380442a3829 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) default: nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ @@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) return ret_val; phy->id = (u32)(phy_id << 16); - udelay(20); + usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) if (!(swsm & E1000_SWSM_SMBI)) break; - udelay(50); + usleep_range(50, 100); i++; } @@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) if (er32(SWSM) & E1000_SWSM_SWESMBI) break; - udelay(50); + usleep_range(50, 100); } if (i == fw_timeout) { @@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } + /** * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore * @hw: pointer to the HW structure @@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, } for (i = 0; i < words; i++) { - eewr = (data[i] << E1000_NVM_RW_REG_DATA) | - ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | - E1000_NVM_RW_REG_START; + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) @@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) s32 timeout = PHY_CFG_TIMEOUT; while (timeout) { - if (er32(EEMNGCTL) & - E1000_NVM_CFG_DONE_PORT_0) + if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) break; usleep_range(1000, 2000); timeout--; @@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) } if (hw->nvm.type == e1000_nvm_flash_hw) { - udelay(10); + usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); @@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); @@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ @@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) break; default: reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); break; } @@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) status = er32(STATUS); er32(RXCW); /* SYNCH bit and IV bit are sticky */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { @@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * the IV bit and restart Autoneg */ for (i = 0; i < AN_RETRY_COUNT; i++) { - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && (rxcw & E1000_RXCW_C)) @@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = { .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, }; - diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 85cb1a3b7cd4..08e24dc3dc0e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -44,6 +44,8 @@ #define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAC_MASK_82574 0x01F00000 +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Manageability Operation Mode mask */ #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index fc3a4fe1ac71..b7c664f2a95f 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -66,7 +66,7 @@ #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000 @@ -216,6 +216,8 @@ #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ @@ -239,12 +241,11 @@ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 - #define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_100_HALF 0x0004 @@ -311,6 +312,7 @@ /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 /* Receive Checksum Control */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ @@ -400,7 +402,8 @@ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ @@ -583,13 +586,13 @@ #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ #define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) -#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -#define E1000_FLASH_UPDATES 2000 +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ +#define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ #define NVM_COMPAT 0x0003 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index fcc758138b8a..e371f771cf8b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -61,7 +61,6 @@ struct e1000_info; #define e_notice(format, arg...) \ netdev_notice(adapter->netdev, format, ## arg) - /* Interrupt modes, as used by the IntMode parameter */ #define E1000E_INT_MODE_LEGACY 0 #define E1000E_INT_MODE_MSI 1 @@ -239,9 +238,8 @@ struct e1000_adapter { u16 tx_itr; u16 rx_itr; - /* Tx */ - struct e1000_ring *tx_ring /* One per active queue */ - ____cacheline_aligned_in_smp; + /* Tx - one ring per active queue */ + struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; u32 tx_fifo_limit; struct napi_struct napi; @@ -487,8 +485,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring); extern void e1000e_free_rx_resources(struct e1000_ring *ring); extern void e1000e_free_tx_resources(struct e1000_ring *ring); extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 - *stats); + struct rtnl_link_stats64 + *stats); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter); @@ -558,12 +556,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) return hw->nvm.ops.update(hw); } -static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { return hw->nvm.ops.read(hw, offset, words, data); } -static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { return hw->nvm.ops.write(hw, offset, words, data); } @@ -597,7 +597,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw) s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) - udelay(50); + usleep_range(50, 100); return i; } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f91a8f3f9d48..e835e7b95f81 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -40,7 +40,7 @@ #include "e1000.h" -enum {NETDEV_STATS, E1000_STATS}; +enum { NETDEV_STATS, E1000_STATS }; struct e1000_stats { char stat_string[ETH_GSTRING_LEN]; @@ -121,6 +121,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; + #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) static int e1000_get_settings(struct net_device *netdev, @@ -197,8 +198,7 @@ static int e1000_get_settings(struct net_device *netdev, /* MDI-X => 2; MDI =>1; Invalid =>0 */ if ((hw->phy.media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) - ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : - ETH_TP_MDI; + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; @@ -224,8 +224,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) { + (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { goto err_inval; } @@ -298,12 +297,10 @@ static int e1000_set_settings(struct net_device *netdev, hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg; + ADVERTISED_FIBRE | ADVERTISED_Autoneg; else hw->phy.autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; + ADVERTISED_TP | ADVERTISED_Autoneg; ecmd->advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; @@ -346,7 +343,7 @@ static void e1000_get_pauseparam(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; pause->autoneg = - (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); if (hw->fc.current_mode == e1000_fc_rx_pause) { pause->rx_pause = 1; @@ -435,7 +432,7 @@ static void e1000_get_regs(struct net_device *netdev, memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); @@ -503,8 +500,8 @@ static int e1000_get_eeprom(struct net_device *netdev, first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); + eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; @@ -515,7 +512,7 @@ static int e1000_get_eeprom(struct net_device *netdev, } else { for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, - &eeprom_buff[i]); + &eeprom_buff[i]); if (ret_val) break; } @@ -553,7 +550,8 @@ static int e1000_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; - if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + if (eeprom->magic != + (adapter->pdev->vendor | (adapter->pdev->device << 16))) return -EFAULT; if (adapter->flags & FLAG_READ_ONLY_NVM) @@ -579,7 +577,7 @@ static int e1000_set_eeprom(struct net_device *netdev, /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ ret_val = e1000_read_nvm(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + &eeprom_buff[last_word - first_word]); if (ret_val) goto out; @@ -618,8 +616,7 @@ static void e1000_get_drvinfo(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, e1000e_driver_name, - sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, e1000e_driver_version, sizeof(drvinfo->version)); @@ -627,10 +624,10 @@ static void e1000_get_drvinfo(struct net_device *netdev, * PCI-E controllers */ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d-%d", - (adapter->eeprom_vers & 0xF000) >> 12, - (adapter->eeprom_vers & 0x0FF0) >> 4, - (adapter->eeprom_vers & 0x000F)); + "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -756,7 +753,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, { u32 pat, val; static const u32 test[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; for (pat = 0; pat < ARRAY_SIZE(test); pat++) { E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, (test[pat] & write)); @@ -786,6 +784,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, } return 0; } + #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ do { \ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ @@ -813,16 +812,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) u32 wlock_mac = 0; /* The status register is Read Only, so a write should fail. - * Some bits that get toggled are ignored. + * Some bits that get toggled are ignored. There are several bits + * on newer hardware that are r/w. */ switch (mac->type) { - /* there are several bits on newer hardware that are r/w */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: toggle = 0x7FFFF3FF; break; - default: + default: toggle = 0x7FFFF033; break; } @@ -928,7 +927,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) } /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) + if ((checksum != (u16)NVM_SUM) && !(*data)) *data = 2; return *data; @@ -936,7 +935,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) { - struct net_device *netdev = (struct net_device *) data; + struct net_device *netdev = (struct net_device *)data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -969,8 +968,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = 0; - } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)) { + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, + netdev)) { *data = 1; ret_val = -1; goto out; @@ -1080,28 +1079,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) struct e1000_ring *tx_ring = &adapter->test_tx_ring; struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; int i; if (tx_ring->desc && tx_ring->buffer_info) { for (i = 0; i < tx_ring->count; i++) { - if (tx_ring->buffer_info[i].dma) + buffer_info = &tx_ring->buffer_info[i]; + + if (buffer_info->dma) dma_unmap_single(&pdev->dev, - tx_ring->buffer_info[i].dma, - tx_ring->buffer_info[i].length, - DMA_TO_DEVICE); - if (tx_ring->buffer_info[i].skb) - dev_kfree_skb(tx_ring->buffer_info[i].skb); + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); } } if (rx_ring->desc && rx_ring->buffer_info) { for (i = 0; i < rx_ring->count; i++) { - if (rx_ring->buffer_info[i].dma) + buffer_info = &rx_ring->buffer_info[i]; + + if (buffer_info->dma) dma_unmap_single(&pdev->dev, - rx_ring->buffer_info[i].dma, - 2048, DMA_FROM_DEVICE); - if (rx_ring->buffer_info[i].skb) - dev_kfree_skb(rx_ring->buffer_info[i].skb); + buffer_info->dma, + 2048, DMA_FROM_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); } } @@ -1138,8 +1142,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->count = E1000_DEFAULT_TXD; tx_ring->buffer_info = kcalloc(tx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); + sizeof(struct e1000_buffer), GFP_KERNEL); if (!tx_ring->buffer_info) { ret_val = 1; goto err_nomem; @@ -1156,8 +1159,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); + ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); ew32(TDH(0), 0); ew32(TDT(0), 0); @@ -1179,8 +1182,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].length = skb->len; tx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, tx_ring->buffer_info[i].dma)) { ret_val = 4; @@ -1200,8 +1203,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->count = E1000_DEFAULT_RXD; rx_ring->buffer_info = kcalloc(rx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); + sizeof(struct e1000_buffer), GFP_KERNEL); if (!rx_ring->buffer_info) { ret_val = 5; goto err_nomem; @@ -1220,16 +1222,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); - ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); + ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); ew32(RDLEN(0), rx_ring->size); ew32(RDH(0), 0); ew32(RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | - E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | - E1000_RCTL_SBP | E1000_RCTL_SECRC | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); ew32(RCTL, rctl); for (i = 0; i < rx_ring->count; i++) { @@ -1244,8 +1246,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) skb_reserve(skb, NET_IP_ALIGN); rx_ring->buffer_info[i].skb = skb; rx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, 2048, - DMA_FROM_DEVICE); + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, rx_ring->buffer_info[i].dma)) { ret_val = 8; @@ -1296,7 +1298,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) ew32(CTRL, ctrl_reg); e1e_flush(); - udelay(500); + usleep_range(500, 1000); return 0; } @@ -1322,7 +1324,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) e1e_wphy(hw, PHY_REG(2, 21), phy_reg); /* Assert SW reset for above settings to take effect */ hw->phy.ops.commit(hw); - mdelay(1); + usleep_range(1000, 2000); /* Force Full Duplex */ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); @@ -1363,7 +1365,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) /* force 1000, set loopback */ e1e_wphy(hw, MII_BMCR, 0x4140); - mdelay(250); + msleep(250); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = er32(CTRL); @@ -1395,7 +1397,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) if (hw->phy.type == e1000_phy_m88) e1000_phy_disable_receiver(adapter); - udelay(500); + usleep_range(500, 1000); return 0; } @@ -1431,8 +1433,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) /* special write to serdes control register to enable SerDes analog * loopback */ -#define E1000_SERDES_LB_ON 0x410 - ew32(SCTL, E1000_SERDES_LB_ON); + ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 20000); @@ -1526,8 +1527,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_82572: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { -#define E1000_SERDES_LB_OFF 0x400 - ew32(SCTL, E1000_SERDES_LB_OFF); + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 20000); break; @@ -1564,7 +1564,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb, frame_size &= ~1; if (*(skb->data + 3) == 0xFF) if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) + (*(skb->data + frame_size / 2 + 12) == 0xAF)) return 0; return 13; } @@ -1575,6 +1575,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; int i, j, k, l; int lc; int good_cnt; @@ -1595,14 +1596,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) k = 0; l = 0; - for (j = 0; j <= lc; j++) { /* loop count loop */ - for (i = 0; i < 64; i++) { /* send the packets */ - e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, - 1024); + /* loop count loop */ + for (j = 0; j <= lc; j++) { + /* send the packets */ + for (i = 0; i < 64; i++) { + buffer_info = &tx_ring->buffer_info[k]; + + e1000_create_lbtest_frame(buffer_info->skb, 1024); dma_sync_single_for_device(&pdev->dev, - tx_ring->buffer_info[k].dma, - tx_ring->buffer_info[k].length, - DMA_TO_DEVICE); + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); k++; if (k == tx_ring->count) k = 0; @@ -1612,13 +1616,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) msleep(200); time = jiffies; /* set the start time for the receive */ good_cnt = 0; - do { /* receive the sent packets */ + /* receive the sent packets */ + do { + buffer_info = &rx_ring->buffer_info[l]; + dma_sync_single_for_cpu(&pdev->dev, - rx_ring->buffer_info[l].dma, 2048, - DMA_FROM_DEVICE); + buffer_info->dma, 2048, + DMA_FROM_DEVICE); - ret_val = e1000_check_lbtest_frame( - rx_ring->buffer_info[l].skb, 1024); + ret_val = e1000_check_lbtest_frame(buffer_info->skb, + 1024); if (!ret_val) good_cnt++; l++; @@ -1637,7 +1644,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ret_val = 14; /* error code for time out error */ break; } - } /* end loop count loop */ + } return ret_val; } @@ -1696,7 +1703,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) /* On some Phy/switch combinations, link establishment * can take a few seconds more than expected. */ - msleep(5000); + msleep_interruptible(5000); if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; @@ -1980,12 +1987,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: - p = (char *) &net_stats + - e1000_gstrings_stats[i].stat_offset; + p = (char *)&net_stats + + e1000_gstrings_stats[i].stat_offset; break; case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; + p = (char *)adapter + + e1000_gstrings_stats[i].stat_offset; break; default: data[i] = 0; @@ -1993,7 +2000,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, } data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 1e6b889aee87..84850f7a23e4 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -167,7 +167,7 @@ enum e1000_1000t_rx_status { e1000_1000t_rx_status_undefined = 0xFF }; -enum e1000_rev_polarity{ +enum e1000_rev_polarity { e1000_rev_polarity_normal = 0, e1000_rev_polarity_reversed, e1000_rev_polarity_undefined = 0xFF @@ -545,7 +545,7 @@ struct e1000_mac_info { u16 mta_reg_count; /* Maximum size of the MTA register table in all supported adapters */ - #define MAX_MTA_REG 128 +#define MAX_MTA_REG 128 u32 mta_shadow[MAX_MTA_REG]; u16 rar_entry_count; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 121a865c7fbd..1cdec5fd2129 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -61,15 +61,15 @@ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { - u16 flcdone :1; /* bit 0 Flash Cycle Done */ - u16 flcerr :1; /* bit 1 Flash Cycle Error */ - u16 dael :1; /* bit 2 Direct Access error Log */ - u16 berasesz :2; /* bit 4:3 Sector Erase Size */ - u16 flcinprog :1; /* bit 5 flash cycle in Progress */ - u16 reserved1 :2; /* bit 13:6 Reserved */ - u16 reserved2 :6; /* bit 13:6 Reserved */ - u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ - u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; @@ -78,11 +78,11 @@ union ich8_hws_flash_status { /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { - u16 flcgo :1; /* 0 Flash Cycle Go */ - u16 flcycle :2; /* 2:1 Flash Cycle */ - u16 reserved :5; /* 7:3 Reserved */ - u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ - u16 flockdn :6; /* 15:10 Reserved */ + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; @@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl { /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { - u32 grra :8; /* 0:7 GbE region Read Access */ - u32 grwa :8; /* 8:15 GbE region Write Access */ - u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ - u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; @@ -312,7 +312,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, mac_reg); e1e_flush(); - udelay(10); + usleep_range(10, 20); mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, mac_reg); e1e_flush(); @@ -548,8 +548,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ - nvm->flash_bank_size = (sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); @@ -1134,9 +1134,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) u32 fwsm; fwsm = er32(FWSM); - return (fwsm & E1000_ICH_FWSM_FW_VALID) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); + return ((fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); } /** @@ -1153,7 +1153,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && - (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** @@ -1440,8 +1440,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { - ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, - ®_data); + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) goto release; @@ -1501,13 +1500,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_1000)) + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) k1_enable = false; } @@ -1516,13 +1515,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_MASK; + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_1000)) + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) k1_enable = false; } @@ -1579,7 +1578,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) if (ret_val) return ret_val; - udelay(20); + usleep_range(20, 40); ctrl_ext = er32(CTRL_EXT); ctrl_reg = er32(CTRL); @@ -1589,11 +1588,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); e1e_flush(); - udelay(20); + usleep_range(20, 40); ew32(CTRL, ctrl_reg); ew32(CTRL_EXT, ctrl_ext); e1e_flush(); - udelay(20); + usleep_range(20, 40); return 0; } @@ -1667,7 +1666,6 @@ release: return ret_val; } - /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure @@ -1834,7 +1832,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { - u8 mac_addr[ETH_ALEN] = {0}; + u8 mac_addr[ETH_ALEN] = { 0 }; u32 addr_high, addr_low; addr_high = er32(RAH(i)); @@ -1865,8 +1863,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, @@ -1875,8 +1873,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); + E1000_KMRNCTRLSTA_HD_CTRL, + &data); if (ret_val) return ret_val; data &= ~(0xF << 8); @@ -1923,8 +1921,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, @@ -1933,8 +1931,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); + E1000_KMRNCTRLSTA_HD_CTRL, + &data); if (ret_val) return ret_val; data &= ~(0xF << 8); @@ -2100,7 +2098,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) do { data = er32(STATUS); data &= E1000_STATUS_LAN_INIT_DONE; - udelay(100); + usleep_range(100, 200); } while ((!data) && --loop); /* If basic configuration is incomplete before the above loop @@ -2445,7 +2443,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, - &sig_byte); + &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == @@ -2456,8 +2454,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + - bank1_offset, - &sig_byte); + bank1_offset, + &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == @@ -2510,8 +2508,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, ret_val = 0; for (i = 0; i < words; i++) { - if (dev_spec->shadow_ram[offset+i].modified) { - data[i] = dev_spec->shadow_ram[offset+i].value; + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = dev_spec->shadow_ram[offset + i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, @@ -2696,8 +2694,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { udelay(1); @@ -2714,8 +2712,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_READ_COMMAND_TIMEOUT); + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else @@ -2774,8 +2773,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, nvm->ops.acquire(hw); for (i = 0; i < words; i++) { - dev_spec->shadow_ram[offset+i].modified = true; - dev_spec->shadow_ram[offset+i].value = data[i]; + dev_spec->shadow_ram[offset + i].modified = true; + dev_spec->shadow_ram[offset + i].value = data[i]; } nvm->ops.release(hw); @@ -2844,8 +2843,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + - old_bank_offset, - &data); + old_bank_offset, + &data); if (ret_val) break; } @@ -2863,7 +2862,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; - udelay(100); + usleep_range(100, 200); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, @@ -2871,10 +2870,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) break; - udelay(100); + usleep_range(100, 200); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset + 1, - (u8)(data >> 8)); + act_offset + 1, + (u8)(data >> 8)); if (ret_val) break; } @@ -3050,8 +3049,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { udelay(1); @@ -3062,7 +3061,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ - hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -3078,8 +3077,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_WRITE_COMMAND_TIMEOUT); + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (!ret_val) break; @@ -3138,7 +3138,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, for (program_retries = 0; program_retries < 100; program_retries++) { e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); - udelay(100); + usleep_range(100, 200); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) break; @@ -3209,8 +3209,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; - for (j = 0; j < iteration ; j++) { + for (j = 0; j < iteration; j++) { do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) @@ -3230,8 +3232,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) flash_linear_addr += (j * sector_size); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_ERASE_COMMAND_TIMEOUT); + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (!ret_val) break; @@ -3270,8 +3271,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) return ret_val; } - if (*data == ID_LED_RESERVED_0000 || - *data == ID_LED_RESERVED_FFFF) + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return 0; @@ -3511,9 +3511,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Setup the receive address. */ e1000e_init_rx_addrs(hw, mac->rar_entry_count); @@ -3541,16 +3541,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy for both queues */ txdctl = er32(TXDCTL(0)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(0), txdctl); txdctl = er32(TXDCTL(1)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. @@ -3559,7 +3559,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else - snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + snoop = (u32)~(PCIE_NO_SNOOP_ALL); e1000e_set_pcie_no_snoop(hw, snoop); ctrl_ext = er32(CTRL_EXT); @@ -3575,6 +3575,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) return ret_val; } + /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure @@ -3686,8 +3687,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) */ hw->fc.current_mode = hw->fc.requested_mode; - e_dbg("After fix-ups FlowControl is now = %x\n", - hw->fc.current_mode); + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); @@ -3737,12 +3737,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - ®_data); + ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - reg_data); + reg_data); if (ret_val) return ret_val; @@ -3815,8 +3815,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, return ret_val; if ((hw->mac.type == e1000_ich8lan) && - (hw->phy.type == e1000_phy_igp_3) && - (*speed == SPEED_1000)) { + (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } @@ -3899,7 +3898,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) * /disabled - false). **/ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, - bool state) + bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; @@ -3981,12 +3980,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) return; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - ®_data); + ®_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - reg_data); + reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b78e02174601..b25cc4300de7 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * serdes media type. */ /* SYNCH bit and IV bit are sticky. */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { @@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) status = er32(STATUS); if (status & E1000_STATUS_LU) { /* SYNCH bit and IV bit are sticky, so reread rxcw. */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { @@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (!(swsm & E1000_SWSM_SMBI)) break; - udelay(50); + usleep_range(50, 100); i++; } @@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (er32(SWSM) & E1000_SWSM_SWESMBI) break; - udelay(50); + usleep_range(50, 100); } if (i == timeout) { @@ -1712,7 +1712,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) while (timeout) { if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; - udelay(100); + usleep_range(100, 200); timeout--; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..142ca39a68f6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ @@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, cpu_to_le64(ps_page->dma); } - skb = __netdev_alloc_skb_ip_align(netdev, - adapter->rx_ps_bsize0, + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, gfp); if (!skb) { @@ -850,8 +848,8 @@ check_page: if (!buffer_info->dma) buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - PAGE_SIZE, + buffer_info->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); @@ -937,10 +935,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, cleaned = true; cleaned_count++; - dma_unmap_single(&pdev->dev, - buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->wb.upper.length); @@ -1068,8 +1064,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, static void e1000_print_hw_hang(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - print_hang_task); + struct e1000_adapter, + print_hang_task); struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; @@ -1082,8 +1078,7 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!adapter->tx_hang_recheck && - (adapter->flags2 & FLAG2_DMA_BURST)) { + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { /* May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ @@ -1125,19 +1120,10 @@ static void e1000_print_hw_hang(struct work_struct *work) "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n", - readl(tx_ring->head), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status, - er32(STATUS), - phy_status, - phy_1000t_status, - phy_ext_status, - pci_status); + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) @@ -1430,7 +1416,7 @@ copydone: e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); if (rx_desc->wb.upper.header_status & - cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1468,7 +1454,7 @@ next_desc: * e1000_consume_page - helper function **/ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) + u16 length) { bi->page = NULL; skb->len += length; @@ -1495,7 +1481,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); @@ -1541,7 +1528,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, rx_ring->rx_skb_top = NULL; goto next_desc; } - #define rxtop (rx_ring->rx_skb_top) if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ @@ -1549,12 +1535,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, /* this is the beginning of a chain */ rxtop = skb; skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); + 0, length); } else { /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the skb, only consumed the page */ buffer_info->skb = skb; } @@ -1563,9 +1550,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, } else { if (rxtop) { /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the current skb, we only consumed the * page */ @@ -1590,10 +1578,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); + buffer_info->page, 0, + length); e1000_consume_page(buffer_info, skb, - length); + length); } } } @@ -1666,8 +1654,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) dma_unmap_page(&pdev->dev, buffer_info->dma, - PAGE_SIZE, - DMA_FROM_DEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -1720,7 +1707,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) static void e1000e_downshift_workaround(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, downshift_task); + struct e1000_adapter, + downshift_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -1913,7 +1901,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; - adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1970,7 +1957,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ew32(RFCTL, rfctl); } -#define E1000_IVAR_INT_ALLOC_VALID 0x8 /* Configure Rx vector */ rx_ring->ims_val = E1000_IMS_RXQ0; adapter->eiac_mask |= rx_ring->ims_val; @@ -2045,8 +2031,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) if (adapter->flags & FLAG_HAS_MSIX) { adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ adapter->msix_entries = kcalloc(adapter->num_vectors, - sizeof(struct msix_entry), - GFP_KERNEL); + sizeof(struct + msix_entry), + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; @@ -2490,7 +2477,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; @@ -2498,13 +2485,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) + else if ((packets < 10) || ((bytes / packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; - } else if (bytes/packets > 2000) { + } else if (bytes / packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; @@ -2556,8 +2543,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter) current_itr = max(adapter->rx_itr, adapter->tx_itr); - switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { case lowest_latency: new_itr = 70000; break; @@ -2578,8 +2565,7 @@ set_itr_now: * increasing */ new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; adapter->rx_ring->itr_val = new_itr; if (adapter->msix_entries) @@ -2810,8 +2796,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { e1000_vlan_rx_add_vid(netdev, vid); adapter->mng_vlan_id = vid; } @@ -2827,7 +2812,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter) e1000_vlan_rx_add_vid(adapter->netdev, 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); + e1000_vlan_rx_add_vid(adapter->netdev, vid); } static void e1000_init_manageability_pt(struct e1000_adapter *adapter) @@ -3002,8 +2987,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rctl = er32(RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not Store bad packets */ rctl &= ~E1000_RCTL_SBP; @@ -3089,19 +3074,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; - psrctl |= adapter->rx_ps_bsize0 >> - E1000_PSRCTL_BSIZE0_SHIFT; + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; switch (adapter->rx_ps_pages) { case 3: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ case 2: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ case 1: - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; } @@ -3275,7 +3258,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev) /* update_mc_addr_list expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -3752,8 +3735,7 @@ void e1000e_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -3856,13 +3838,13 @@ void e1000e_reset(struct e1000_adapter *adapter) if ((adapter->max_frame_size * 2) > (pba << 10)) { if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned off\n"); + "Interrupt Throttle Rate off\n"); adapter->flags2 |= FLAG2_DISABLE_AIM; e1000e_write_itr(adapter, 0); } } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned on\n"); + "Interrupt Throttle Rate on\n"); adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->itr = 20000; e1000e_write_itr(adapter, adapter->itr); @@ -4261,8 +4243,7 @@ static int e1000_open(struct net_device *netdev) e1000e_power_up_phy(adapter); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ @@ -4365,8 +4346,7 @@ static int e1000_close(struct net_device *netdev) /* kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) */ - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); /* If AMT is enabled, let the firmware know that the network @@ -4382,6 +4362,7 @@ static int e1000_close(struct net_device *netdev) return 0; } + /** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -4432,7 +4413,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p) static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, update_phy_task); + struct e1000_adapter, + update_phy_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4449,7 +4431,7 @@ static void e1000e_update_phy_task(struct work_struct *work) **/ static void e1000_update_phy_info(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4616,18 +4598,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) * our own version based on RUC and ROC */ netdev->stats.rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; netdev->stats.rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.roc; netdev->stats.rx_crc_errors = adapter->stats.crcerrs; netdev->stats.rx_frame_errors = adapter->stats.algnerrc; netdev->stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - netdev->stats.tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; netdev->stats.tx_aborted_errors = adapter->stats.ecol; netdev->stats.tx_window_errors = adapter->stats.latecol; netdev->stats.tx_carrier_errors = adapter->stats.tncrs; @@ -4785,7 +4765,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) **/ static void e1000_watchdog(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -4796,7 +4776,8 @@ static void e1000_watchdog(unsigned long data) static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, watchdog_task); + struct e1000_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -4830,8 +4811,8 @@ static void e1000_watchdog_task(struct work_struct *work) /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); e1000_print_link_info(adapter); /* check if SmartSpeed worked */ @@ -4944,7 +4925,7 @@ static void e1000_watchdog_task(struct work_struct *work) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, - LINK_TIMEOUT); + LINK_TIMEOUT); } } @@ -4979,8 +4960,8 @@ link_up: */ u32 goc = (adapter->gotc + adapter->gorc) / 10000; u32 dif = (adapter->gotc > adapter->gorc ? - adapter->gotc - adapter->gorc : - adapter->gorc - adapter->gotc) / 10000; + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; e1000e_write_itr(adapter, itr); @@ -5059,14 +5040,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); + 0, IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); ipcse = 0; } ipcss = skb_network_offset(skb); @@ -5075,7 +5056,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); @@ -5145,8 +5126,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc->lower_setup.ip_config = 0; context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(cmd_len); @@ -5219,7 +5199,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, - offset, size, DMA_TO_DEVICE); + offset, size, + DMA_TO_DEVICE); buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; @@ -5268,7 +5249,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (tx_flags & E1000_TX_FLAGS_TSO) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; + E1000_TXD_CMD_TSE; txd_upper |= E1000_TXD_POPTS_TXSM << 8; if (tx_flags & E1000_TX_FLAGS_IPV4) @@ -5299,8 +5280,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->lower.data = - cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); i++; @@ -5350,11 +5331,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; - if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) return 0; { - const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); struct udphdr *udp; if (ip->protocol != IPPROTO_UDP) @@ -5579,7 +5560,7 @@ static void e1000_reset_task(struct work_struct *work) * Returns the address of the device statistics structure. **/ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5600,18 +5581,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, * our own version based on RUC and ROC */ stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; stats->rx_crc_errors = adapter->stats.crcerrs; stats->rx_frame_errors = adapter->stats.algnerrc; stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; stats->tx_aborted_errors = adapter->stats.ecol; stats->tx_window_errors = adapter->stats.latecol; stats->tx_carrier_errors = adapter->stats.tncrs; @@ -5680,9 +5658,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN - + ETH_FCS_LEN; + + ETH_FCS_LEN; if (netif_running(netdev)) e1000e_up(adapter); @@ -5861,7 +5839,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg &= ~(BM_RCTL_MO_MASK); if (mac_reg & E1000_RCTL_MO_3) phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) - << BM_RCTL_MO_SHIFT); + << BM_RCTL_MO_SHIFT); if (mac_reg & E1000_RCTL_BAM) phy_reg |= BM_RCTL_BAM; if (mac_reg & E1000_RCTL_PMCF) @@ -5930,10 +5908,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } ctrl = er32(CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; @@ -5977,8 +5951,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) */ e1000e_release_hw_control(adapter); - pci_clear_master(pdev); - /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the @@ -6077,24 +6049,24 @@ static int __e1000_resume(struct pci_dev *pdev) e1e_rphy(&adapter->hw, BM_WUS, &phy_data); if (phy_data) { e_info("PHY Wakeup cause - %s\n", - phy_data & E1000_WUS_EX ? "Unicast Packet" : - phy_data & E1000_WUS_MC ? "Multicast Packet" : - phy_data & E1000_WUS_BC ? "Broadcast Packet" : - phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? - "Link Status Change" : "other"); + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { u32 wus = er32(WUS); if (wus) { e_info("MAC Wakeup cause - %s\n", - wus & E1000_WUS_EX ? "Unicast Packet" : - wus & E1000_WUS_MC ? "Multicast Packet" : - wus & E1000_WUS_BC ? "Broadcast Packet" : - wus & E1000_WUS_MAG ? "Magic Packet" : - wus & E1000_WUS_LNKC ? "Link Status Change" : - "other"); + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); } ew32(WUS, ~0); } @@ -6369,7 +6341,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) e_info("(PCI Express:2.5GT/s:%s) %pM\n", /* bus width */ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - "Width x1"), + "Width x1"), /* MAC address */ netdev->dev_addr); e_info("Intel(R) PRO/%s Network Connection\n", @@ -6479,7 +6451,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) resource_size_t flash_start, flash_len; static int cards_found; u16 aspm_disable_flag = 0; - int i, err, pci_using_dac; + int bars, i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; @@ -6506,15 +6478,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } } - err = pci_request_selected_regions_exclusive(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - e1000e_driver_name); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); if (err) goto err_pci_reg; @@ -6683,11 +6656,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long) adapter; + adapter->watchdog_timer.data = (unsigned long)adapter; init_timer(&adapter->phy_info_timer); adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long) adapter; + adapter->phy_info_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); @@ -6795,7 +6768,7 @@ err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -6865,7 +6838,7 @@ static void e1000_remove(struct pci_dev *pdev) if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); @@ -6886,7 +6859,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, @@ -6962,8 +6936,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); #ifdef CONFIG_PM static const struct dev_pm_ops e1000_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) - SET_RUNTIME_PM_OPS(e1000_runtime_suspend, - e1000_runtime_resume, e1000_idle) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, + e1000_idle) }; #endif diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 84fecc268162..44ddc0a0ee0e 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw) { u32 ctrl_ext; - udelay(10); + usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 98da75dff936..c16bd75b6caa 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -45,7 +45,7 @@ unsigned int copybreak = COPYBREAK_DEFAULT; module_param(copybreak, uint, 0644); MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); + "Maximum size of packet that is copied to a new buffer on receive"); /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code @@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); * * Default Value: 1 (enabled) */ -E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); +E1000_PARAM(WriteProtectNVM, + "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); /* Enable CRC Stripping * @@ -160,13 +161,18 @@ struct e1000_option { const char *err; int def; union { - struct { /* range_option info */ + /* range_option info */ + struct { int min; int max; } r; - struct { /* list_option info */ + /* list_option info */ + struct { int nr; - struct e1000_opt_list { int i; char *str; } *p; + struct e1000_opt_list { + int i; + char *str; + } *p; } l; } arg; }; @@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) "Using defaults for all values\n"); } - { /* Transmit Interrupt Delay */ + /* Transmit Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Interrupt Delay", @@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->tx_int_delay = opt.def; } } - { /* Transmit Absolute Interrupt Delay */ + /* Transmit Absolute Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Absolute Interrupt Delay", @@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->tx_abs_int_delay = opt.def; } } - { /* Receive Interrupt Delay */ + /* Receive Interrupt Delay */ + { static struct e1000_option opt = { .type = range_option, .name = "Receive Interrupt Delay", @@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->rx_int_delay = opt.def; } } - { /* Receive Absolute Interrupt Delay */ + /* Receive Absolute Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Receive Absolute Interrupt Delay", @@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->rx_abs_int_delay = opt.def; } } - { /* Interrupt Throttling Rate */ + /* Interrupt Throttling Rate */ + { static const struct e1000_option opt = { .type = range_option, .name = "Interrupt Throttling Rate (ints/sec)", @@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) break; } } - { /* Interrupt Mode */ + /* Interrupt Mode */ + { static struct e1000_option opt = { .type = range_option, .name = "Interrupt Mode", @@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) kfree(opt.err); #endif } - { /* Smart Power Down */ + /* Smart Power Down */ + { static const struct e1000_option opt = { .type = enable_option, .name = "PHY Smart Power Down", @@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->flags |= FLAG_SMART_POWER_DOWN; } } - { /* CRC Stripping */ + /* CRC Stripping */ + { static const struct e1000_option opt = { .type = enable_option, .name = "CRC Stripping", @@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; } } - { /* Kumeran Lock Loss Workaround */ + /* Kumeran Lock Loss Workaround */ + { static const struct e1000_option opt = { .type = enable_option, .name = "Kumeran Lock Loss Workaround", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; + bool enabled = opt.def; if (num_KumeranLockLoss > bd) { unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; e1000_validate_option(&kmrn_lock_loss, &opt, adapter); - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - kmrn_lock_loss); - } else { - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - opt.def); + enabled = kmrn_lock_loss; } + + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + enabled); } - { /* Write-protect NVM */ + /* Write-protect NVM */ + { static const struct e1000_option opt = { .type = enable_option, .name = "Write-protect NVM", @@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) if (adapter->flags & FLAG_IS_ICH) { if (num_WriteProtectNVM > bd) { - unsigned int write_protect_nvm = WriteProtectNVM[bd]; + unsigned int write_protect_nvm = + WriteProtectNVM[bd]; e1000_validate_option(&write_protect_nvm, &opt, adapter); if (write_protect_nvm) diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0930c136aa31..60dbf022e986 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { - 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED +}; + #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_m88_cable_length_table) @@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = { 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, - 124}; + 124 +}; + #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) @@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) manc = er32(MANC); - return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? - E1000_BLK_PHY_RESET : 0; + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; } /** @@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) return ret_val; phy->id = (u32)(phy_id << 16); - udelay(20); + usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -162,7 +165,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); + usleep_range(50, 100); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -175,13 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } - *data = (u16) mdic; + *data = (u16)mdic; /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - udelay(100); + usleep_range(100, 200); return 0; } @@ -220,7 +223,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - udelay(50); + usleep_range(50, 100); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -238,7 +241,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - udelay(100); + usleep_range(100, 200); return 0; } @@ -324,7 +327,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) * semaphores before exiting. **/ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { s32 ret_val = 0; @@ -391,7 +394,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { s32 ret_val = 0; @@ -410,8 +413,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, (u16)offset); if (!ret_val) ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & - offset, - data); + offset, data); if (!locked) hw->phy.ops.release(hw); @@ -458,7 +460,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { u32 kmrnctrlsta; @@ -531,7 +533,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { u32 kmrnctrlsta; @@ -772,8 +774,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) phy_data |= M88E1000_EPSCR_TX_CLK_25; - if ((phy->revision == 2) && - (phy->id == M88E1111_I_PHY_ID)) { + if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; @@ -1296,7 +1297,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; @@ -1319,7 +1320,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; } @@ -1609,9 +1610,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw) ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1653,9 +1654,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &data); if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1685,9 +1686,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) - phy->cable_polarity = (phy_data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1733,7 +1734,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success) + u32 usec_interval, bool *success) { s32 ret_val = 0; u16 i, phy_status; @@ -1756,7 +1757,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, if (phy_status & BMSR_LSTATUS) break; if (usec_interval >= 1000) - mdelay(usec_interval/1000); + mdelay(usec_interval / 1000); else udelay(usec_interval); } @@ -1791,8 +1792,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; @@ -1824,10 +1825,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { - IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ @@ -1841,8 +1842,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) * that can be put into the lookup table to obtain the * approximate cable length. */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || @@ -1865,8 +1866,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -2040,9 +2041,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) return ret_val; } else { /* Polarity is forced */ - phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); } ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); @@ -2119,7 +2120,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) ew32(CTRL, ctrl); e1e_flush(); - udelay(150); + usleep_range(150, 300); phy->ops.release(hw); @@ -2375,13 +2376,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); + (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); release: hw->phy.ops.release(hw); @@ -2433,13 +2434,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); + (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); release: hw->phy.ops.release(hw); return ret_val; @@ -2674,7 +2675,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - data); + data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, @@ -2763,7 +2764,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - data, true); + data, true); goto out; } @@ -2786,8 +2787,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); @@ -2871,7 +2871,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - &data, false); + &data, false); goto out; } @@ -2910,7 +2910,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); + data); out: if (!locked) @@ -2988,15 +2988,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page) * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, - u16 *data, bool read) + u16 *data, bool read) { s32 ret_val; u32 addr_reg; u32 data_reg; /* This takes care of the difference with desktop vs mobile phy */ - addr_reg = (hw->phy.type == e1000_phy_82578) ? - I82578_ADDR_REG : I82577_ADDR_REG; + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ @@ -3050,8 +3050,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) if (ret_val) return ret_val; - data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) @@ -3086,9 +3086,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) - phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -3215,8 +3215,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT; + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..e56a3d169e30 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7007,7 +7007,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], int err; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return -EOPNOTSUPP; + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses @@ -7038,44 +7038,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err = -EOPNOTSUPP; - - if (ndm->ndm_state & NUD_PERMANENT) { - pr_info("%s: FDB only supports static addresses\n", - ixgbe_driver_name); - return -EINVAL; - } - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - } - - return err; -} - -static int ixgbe_ndo_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - int idx) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); - - return idx; -} - static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) { @@ -7171,8 +7133,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, .ndo_fdb_add = ixgbe_ndo_fdb_add, - .ndo_fdb_del = ixgbe_ndo_fdb_del, - .ndo_fdb_dump = ixgbe_ndo_fdb_dump, .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index fc0af9a3bb35..fff0d9867529 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer { struct sk_buff *skb; dma_addr_t dma; unsigned long time_stamp; + union ixgbe_adv_tx_desc *next_to_watch; u16 length; - u16 next_to_watch; u16 mapped_as_page; }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c3db6cd69b68..2635b8303515 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static struct pci_device_id ixgbevf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), - board_82599_vf}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), - board_X540_vf}, - +static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, /* required last entry */ {0, } }; @@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int i, eop, count = 0; + unsigned int i, count = 0; unsigned int total_bytes = 0, total_packets = 0; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + eop_desc = tx_buffer_info->next_to_watch; - while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < tx_ring->count)) { + do { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - /* eop could change between read and DD-check */ - if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) - goto cont_loop; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer_info->next_to_watch = NULL; + for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - cleaned = (i == eop); + cleaned = (tx_desc == eop_desc); skb = tx_buffer_info->skb; if (cleaned && skb) { @@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, i++; if (i == tx_ring->count) i = 0; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; } -cont_loop: - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); - } + eop_desc = tx_buffer_info->next_to_watch; + } while (count < tx_ring->count); tx_ring->next_to_clean = i; @@ -2806,8 +2812,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, } static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first) + struct sk_buff *skb, u32 tx_flags) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; @@ -2832,7 +2837,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; - tx_buffer_info->next_to_watch = i; len -= size; total -= size; @@ -2862,7 +2866,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->mapped_as_page = true; - tx_buffer_info->next_to_watch = i; len -= size; total -= size; @@ -2881,8 +2884,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, else i = i - 1; tx_ring->tx_buffer_info[i].skb = skb; - tx_ring->tx_buffer_info[first].next_to_watch = i; - tx_ring->tx_buffer_info[first].time_stamp = jiffies; return count; @@ -2891,7 +2892,6 @@ dma_error: /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; - tx_buffer_info->next_to_watch = 0; count--; /* clear timestamp and dma mappings for remaining portion of packet */ @@ -2908,7 +2908,8 @@ dma_error: } static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, - int count, u32 paylen, u8 hdr_len) + int count, unsigned int first, u32 paylen, + u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbevf_tx_buffer *tx_buffer_info; @@ -2959,6 +2960,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + tx_ring->tx_buffer_info[first].time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; } @@ -3050,15 +3061,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags |= IXGBE_TX_FLAGS_CSUM; ixgbevf_tx_queue(tx_ring, tx_flags, - ixgbevf_tx_map(tx_ring, skb, tx_flags, first), - skb->len, hdr_len); - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); + ixgbevf_tx_map(tx_ring, skb, tx_flags), + first, skb->len, hdr_len); writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 995d4b6d5c1e..47b996096559 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1931,79 +1931,6 @@ static int mlx4_en_set_features(struct net_device *netdev, } -static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 flags) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; - - /* Hardware does not support aging addresses, allow only - * permanent addresses if ndm_state is given - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Add FDB only supports static addresses\n"); - return -EINVAL; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_add_excl(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_add_excl(dev, addr); - else - err = -EINVAL; - - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; -} - -static int mlx4_en_fdb_del(struct ndmsg *ndm, - struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; - - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Del FDB only supports static addresses\n"); - return -EINVAL; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - - return err; -} - -static int mlx4_en_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, int idx) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - - if (mlx4_is_mfunc(mdev)) - idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); - - return idx; -} - static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2025,9 +1952,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif - .ndo_fdb_add = mlx4_en_fdb_add, - .ndo_fdb_del = mlx4_en_fdb_del, - .ndo_fdb_dump = mlx4_en_fdb_dump, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 3488c6d9e6b5..2448f0d669e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) /* build the pkt before xmit */ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); - if (!skb) { - en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); + if (!skb) return -ENOMEM; - } + skb_reserve(skb, NET_IP_ALIGN); ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 46795e403467..1bd419dbda6d 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev) /* Malloc up new buffer. */ new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (new_skb == NULL) { - printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 63e7af44366f..cb9e63831500 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev) skb = netdev_alloc_skb(ndev, len); if (unlikely(skb == NULL)) { - printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", - ndev->name); ndev->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 162da8975b05..539d2028e456 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -737,7 +737,6 @@ static void netdev_rx(struct net_device *dev) data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { - dev_err(&pdev->dev, "get skb buffer error\n"); ether->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0b8de12bcbca..b62262cfe4d9 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5025,7 +5025,6 @@ static int nv_loopback_test(struct net_device *dev) pkt_len = ETH_DATA_LEN; tx_skb = netdev_alloc_skb(dev, pkt_len); if (!tx_skb) { - netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n"); ret = 0; goto out; } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 8fd38cb6d26a..91a8fcd6c246 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { - netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); qdev->lrg_buf_skb_check++; } else { /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index ba3c72fce1f2..c8b489516008 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 1 -#define _QLCNIC_LINUX_SUBVERSION 35 -#define QLCNIC_LINUX_VERSIONID "5.1.35" +#define _QLCNIC_LINUX_SUBVERSION 36 +#define QLCNIC_LINUX_VERSIONID "5.1.36" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..c08fa20dd5f0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -15,36 +15,57 @@ #define RSS_HASHTYPE_IP_TCP 0x3 /* status descriptor mailbox data - * @phy_addr: physical address of buffer + * @phy_addr_{low|high}: physical address of buffer * @sds_ring_size: buffer size * @intrpt_id: interrupt id * @intrpt_val: source of interrupt */ struct qlcnic_sds_mbx { - u64 phy_addr; - u8 rsvd1[16]; + u32 phy_addr_low; + u32 phy_addr_high; + u32 rsvd1[4]; +#if defined(__LITTLE_ENDIAN) u16 sds_ring_size; - u16 rsvd2[3]; + u16 rsvd2; + u16 rsvd3[2]; u16 intrpt_id; u8 intrpt_val; - u8 rsvd3[5]; + u8 rsvd4; +#elif defined(__BIG_ENDIAN) + u16 rsvd2; + u16 sds_ring_size; + u16 rsvd3[2]; + u8 rsvd4; + u8 intrpt_val; + u16 intrpt_id; +#endif + u32 rsvd5; } __packed; /* receive descriptor buffer data - * phy_addr_reg: physical address of regular buffer - * phy_addr_jmb: physical address of jumbo buffer + * phy_addr_reg_{low|high}: physical address of regular buffer + * phy_addr_jmb_{low|high}: physical address of jumbo buffer * reg_ring_sz: size of regular buffer * reg_ring_len: no. of entries in regular buffer * jmb_ring_len: no. of entries in jumbo buffer * jmb_ring_sz: size of jumbo buffer */ struct qlcnic_rds_mbx { - u64 phy_addr_reg; - u64 phy_addr_jmb; + u32 phy_addr_reg_low; + u32 phy_addr_reg_high; + u32 phy_addr_jmb_low; + u32 phy_addr_jmb_high; +#if defined(__LITTLE_ENDIAN) u16 reg_ring_sz; u16 reg_ring_len; u16 jmb_ring_sz; u16 jmb_ring_len; +#elif defined(__BIG_ENDIAN) + u16 reg_ring_len; + u16 reg_ring_sz; + u16 jmb_ring_len; + u16 jmb_ring_sz; +#endif } __packed; /* host producers for regular and jumbo rings */ @@ -61,6 +82,7 @@ struct __host_producer_mbx { * @phy_port: physical port id */ struct qlcnic_rcv_mbx_out { +#if defined(__LITTLE_ENDIAN) u8 rcv_num; u8 sts_num; u16 ctx_id; @@ -68,32 +90,56 @@ struct qlcnic_rcv_mbx_out { u8 num_pci_func; u8 phy_port; u8 vport_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; + u8 vport_id; + u8 phy_port; + u8 num_pci_func; + u8 state; +#endif u32 host_csmr[QLCNIC_MAX_RING_SETS]; struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; } __packed; struct qlcnic_add_rings_mbx_out { +#if defined(__LITTLE_ENDIAN) u8 rcv_num; u8 sts_num; - u16 ctx_id; + u16 ctx_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; +#endif u32 host_csmr[QLCNIC_MAX_RING_SETS]; struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; } __packed; /* Transmit context mailbox inbox registers - * @phys_addr: DMA address of the transmit buffer - * @cnsmr_index: host consumer index + * @phys_addr_{low|high}: DMA address of the transmit buffer + * @cnsmr_index_{low|high}: host consumer index * @size: legth of transmit buffer ring * @intr_id: interrput id * @src: src of interrupt */ struct qlcnic_tx_mbx { - u64 phys_addr; - u64 cnsmr_index; + u32 phys_addr_low; + u32 phys_addr_high; + u32 cnsmr_index_low; + u32 cnsmr_index_high; +#if defined(__LITTLE_ENDIAN) u16 size; u16 intr_id; u8 src; u8 rsvd[3]; +#elif defined(__BIG_ENDIAN) + u16 intr_id; + u16 size; + u8 rsvd[3]; + u8 src; +#endif } __packed; /* Transmit context mailbox outbox registers @@ -101,11 +147,18 @@ struct qlcnic_tx_mbx { * @ctx_id: transmit context id * @state: state of the transmit context */ + struct qlcnic_tx_mbx_out { u32 host_prod; +#if defined(__LITTLE_ENDIAN) u16 ctx_id; u8 state; u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 state; + u16 ctx_id; +#endif } __packed; static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { @@ -1004,7 +1057,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) @@ -1090,7 +1144,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[i].id; @@ -1110,13 +1165,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) rds = &recv_ctx->rds_rings[0]; rds->producer = 0; memset(&rds_mbx, 0, rds_mbx_size); - rds_mbx.phy_addr_reg = rds->phys_addr; + rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr); rds_mbx.reg_ring_sz = rds->dma_size; rds_mbx.reg_ring_len = rds->num_desc; /* Jumbo ring */ rds = &recv_ctx->rds_rings[1]; rds->producer = 0; - rds_mbx.phy_addr_jmb = rds->phys_addr; + rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr); rds_mbx.jmb_ring_sz = rds->dma_size; rds_mbx.jmb_ring_len = rds->num_desc; buf = &cmd.req.arg[index]; @@ -1182,8 +1239,10 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); /* setup mailbox inbox registerss */ - mbx.phys_addr = tx->phys_addr; - mbx.cnsmr_index = tx->hw_cons_phys_addr; + mbx.phys_addr_low = LSD(tx->phys_addr); + mbx.phys_addr_high = MSD(tx->phys_addr); + mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr); + mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr); mbx.size = tx->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; @@ -1373,6 +1432,51 @@ mbx_err: } } +int qlcnic_83xx_set_led(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EIO, active = 1; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, + "LED test is not supported in non-privileged mode\n"); + return -EOPNOTSUPP; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) + return -EBUSY; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to set LED blink state\n"); + break; + case ETHTOOL_ID_INACTIVE: + active = 0; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to reset LED blink state\n"); + break; + + default: + return -EINVAL; + } + + if (!active || err) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + return err; +} + void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, int enable) { @@ -1713,7 +1817,12 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, (adapter->recv_ctx->context_id << 16); mv.vlan = le16_to_cpu(vlan_id); - memcpy(&mv.mac, addr, ETH_ALEN); + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; buf = &cmd.req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, &cmd); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 61f81f6c84a9..648a73f904ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -94,8 +94,23 @@ struct qlcnic_intrpt_config { }; struct qlcnic_macvlan_mbx { - u8 mac[ETH_ALEN]; +#if defined(__LITTLE_ENDIAN) + u8 mac_addr0; + u8 mac_addr1; + u8 mac_addr2; + u8 mac_addr3; + u8 mac_addr4; + u8 mac_addr5; u16 vlan; +#elif defined(__BIG_ENDIAN) + u8 mac_addr3; + u8 mac_addr2; + u8 mac_addr1; + u8 mac_addr0; + u16 vlan; + u8 mac_addr5; + u8 mac_addr4; +#endif }; struct qlc_83xx_fw_info { @@ -434,5 +449,6 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); int qlcnic_83xx_loopback_test(struct net_device *, u8); int qlcnic_83xx_interrupt_test(struct net_device *); +int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state); int qlcnic_83xx_flash_test(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 5c033f268ca5..ba5ac69bf48e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -31,6 +31,7 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); /* Template header */ struct qlc_83xx_reset_hdr { +#if defined(__LITTLE_ENDIAN) u16 version; u16 signature; u16 size; @@ -39,14 +40,31 @@ struct qlc_83xx_reset_hdr { u16 checksum; u16 init_offset; u16 start_offset; +#elif defined(__BIG_ENDIAN) + u16 signature; + u16 version; + u16 entries; + u16 size; + u16 checksum; + u16 hdr_size; + u16 start_offset; + u16 init_offset; +#endif } __packed; /* Command entry header. */ struct qlc_83xx_entry_hdr { - u16 cmd; - u16 size; - u16 count; - u16 delay; +#if defined(__LITTLE_ENDIAN) + u16 cmd; + u16 size; + u16 count; + u16 delay; +#elif defined(__BIG_ENDIAN) + u16 size; + u16 cmd; + u16 delay; + u16 count; +#endif } __packed; /* Generic poll command */ @@ -60,10 +78,17 @@ struct qlc_83xx_rmw { u32 mask; u32 xor_value; u32 or_value; +#if defined(__LITTLE_ENDIAN) u8 shl; u8 shr; u8 index_a; u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 index_a; + u8 shr; + u8 shl; +#endif } __packed; /* Generic command with 2 DWORD */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 5641f8ec49ab..f4f279d5cba4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -149,7 +149,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { static inline int qlcnic_82xx_statistics(void) { - return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + return ARRAY_SIZE(qlcnic_device_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); } static inline int qlcnic_83xx_statistics(void) @@ -1070,8 +1071,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static void -qlcnic_fill_stats(u64 *data, void *stats, int type) +static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) { if (type == QLCNIC_MAC_STATS) { struct qlcnic_mac_statistics *mac_stats = @@ -1120,6 +1120,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type) *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); } + return data; } static void qlcnic_get_ethtool_stats(struct net_device *dev, @@ -1147,7 +1148,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, /* Retrieve MAC statistics from firmware */ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); qlcnic_get_mac_stats(adapter, &mac_stats); - qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); + data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) @@ -1159,7 +1160,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, if (ret) return; - qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); + data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) @@ -1176,7 +1177,8 @@ static int qlcnic_set_led(struct net_device *dev, int err = -EIO, active = 1; if (qlcnic_83xx_check(adapter)) - return -EOPNOTSUPP; + return qlcnic_83xx_set_led(dev, state); + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(dev, "LED test not supported for non " "privilege function\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 28a6d4838364..c6f9d5eb7d50 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -253,11 +253,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return err; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_del(ndm, tb, netdev, addr); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { if (is_unicast_ether_addr(addr)) @@ -277,11 +274,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return -EOPNOTSUPP; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { pr_info("%s: FDB e-switch is not enabled\n", __func__); @@ -306,11 +300,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, { struct qlcnic_adapter *adapter = netdev_priv(netdev); - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return -EOPNOTSUPP; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..1dd778a6f01e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "Couldn't get an skb.\n"); rx_ring->sbq_clean_idx = clean_idx; return; } @@ -1519,8 +1517,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, skb = netdev_alloc_skb(ndev, length); if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, need to unwind!.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; @@ -1605,8 +1601,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); if (new_skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "No skb available, drop the packet.\n"); rx_ring->rx_dropped++; return; } diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 5b4103db70f5..e9dc84943cfc 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) break; } + if (limit < 0) + return -ETIMEDOUT; + return ioread16(ioaddr + MMRD); } /* Write a word data from PHY Chip */ -static void r6040_phy_write(void __iomem *ioaddr, +static int r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) { int limit = MAC_DEF_TIMEOUT; @@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr, if (!(cmd & MDIO_WRITE)) break; } + + return (limit < 0) ? -ETIMEDOUT : 0; } static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) @@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - r6040_phy_write(ioaddr, phy_addr, reg, value); - - return 0; + return r6040_phy_write(ioaddr, phy_addr, reg, value); } static int r6040_mdiobus_reset(struct mii_bus *bus) @@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev) do { skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); if (!skb) { - netdev_err(dev, "failed to alloc skb for rx\n"); rc = -ENOMEM; goto err_exit; } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 1276ac71353a..3ccedeb8aba0 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2041,8 +2041,6 @@ keep_pkt: netif_receive_skb (skb); } else { - if (net_ratelimit()) - netdev_warn(dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; } received++; diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 9f2d416de750..d77d60ea8202 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", - dev->name); dev->stats.rx_dropped++; goto done; } diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index 3aca57853ed4..bdac936a68bc 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) { skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); received ++; - } else - goto dropping; + } else { + ether3_outw(next_ptr >> 8, REG_RECVEND); + dev->stats.rx_dropped++; + goto done; + } } else { struct net_device_stats *stats = &dev->stats; ether3_outw(next_ptr >> 8, REG_RECVEND); @@ -679,21 +682,6 @@ done: } return maxcnt; - -dropping:{ - static unsigned long last_warned; - - ether3_outw(next_ptr >> 8, REG_RECVEND); - /* - * Don't print this message too many times... - */ - if (time_after(jiffies, last_warned + 10 * HZ)) { - last_warned = jiffies; - printk("%s: memory squeeze, dropping packet.\n", dev->name); - } - dev->stats.rx_dropped++; - goto done; - } } /* diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 0fde9ca28269..0ad5694b41f8 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -381,8 +381,6 @@ memory_squeeze: dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { - printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; } } else { diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0bc00991d310..f050248e9fba 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -21,7 +21,9 @@ #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> +#include <linux/pci.h> #include <linux/cpu_rmap.h> +#include <linux/aer.h> #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -71,21 +73,21 @@ const char *const efx_loopback_mode_names[] = { const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const char *const efx_reset_type_names[] = { - [RESET_TYPE_INVISIBLE] = "INVISIBLE", - [RESET_TYPE_ALL] = "ALL", - [RESET_TYPE_WORLD] = "WORLD", - [RESET_TYPE_DISABLE] = "DISABLE", - [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", - [RESET_TYPE_INT_ERROR] = "INT_ERROR", - [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", - [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", - [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", - [RESET_TYPE_TX_SKIP] = "TX_SKIP", - [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", + [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", + [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", }; -#define EFX_MAX_MTU (9 * 1024) - /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. @@ -117,9 +119,12 @@ MODULE_PARM_DESC(separate_tx_channels, static int napi_weight = 64; /* This is the time (in jiffies) between invocations of the hardware - * monitor. On Falcon-based NICs, this will: + * monitor. + * On Falcon-based NICs, this will: * - Check the on-board hardware monitor; * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. */ static unsigned int efx_monitor_interval = 1 * HZ; @@ -203,13 +208,14 @@ static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) static int efx_check_disabled(struct efx_nic *efx) { - if (efx->state == STATE_DISABLED) { + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { netif_err(efx, drv, efx->net_dev, "device is disabled due to earlier errors\n"); return -EIO; @@ -242,15 +248,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget) struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - /* Deliver last RX packet. */ - if (channel->rx_pkt) { - __efx_rx_packet(channel, channel->rx_pkt); - channel->rx_pkt = NULL; - } - if (rx_queue->enabled) { - efx_rx_strategy(channel); + efx_rx_flush_packet(channel); + if (rx_queue->enabled) efx_fast_push_rx_descriptors(rx_queue); - } } return spent; @@ -625,20 +625,51 @@ fail: */ static void efx_start_datapath(struct efx_nic *efx) { + bool old_rx_scatter = efx->rx_scatter; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; struct efx_channel *channel; + size_t rx_buf_len; /* Calculate the rx buffer allocation parameters required to * support the current MTU, including padding for header * alignment and overruns. */ - efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + - efx->type->rx_buffer_hash_size + - efx->type->rx_buffer_padding); - efx->rx_buffer_order = get_order(efx->rx_buffer_len + - sizeof(struct efx_rx_page_state)); + efx->rx_dma_len = (efx->type->rx_buffer_hash_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + + EFX_PAGE_IP_ALIGN + efx->rx_dma_len); + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = false; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > + PAGE_SIZE / 2); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* RX filters also have scatter-enabled flags */ + if (efx->rx_scatter != old_rx_scatter) + efx_filter_update_rx_scatter(efx); /* We must keep at least one descriptor in a TX ring empty. * We could avoid this when the queue size does not exactly @@ -655,16 +686,12 @@ static void efx_start_datapath(struct efx_nic *efx) efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue(tx_queue); - /* The rx buffer allocation strategy is MTU dependent */ - efx_rx_strategy(channel); - efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); efx_nic_generate_fill_event(rx_queue); } - WARN_ON(channel->rx_pkt != NULL); - efx_rx_strategy(channel); + WARN_ON(channel->rx_pkt_n_frags); } if (netif_device_present(efx->net_dev)) @@ -683,7 +710,7 @@ static void efx_stop_datapath(struct efx_nic *efx) BUG_ON(efx->port_enabled); /* Only perform flush if dma is enabled */ - if (dev->is_busmaster) { + if (dev->is_busmaster && efx->state != STATE_RECOVERY) { rc = efx_nic_flush_queues(efx); if (rc && EFX_WORKAROUND_7803(efx)) { @@ -1596,13 +1623,15 @@ static void efx_start_all(struct efx_nic *efx) efx_start_port(efx); efx_start_datapath(efx); - /* Start the hardware monitor if there is one. Otherwise (we're link - * event driven), we have to poll the PHY because after an event queue - * flush, we could have a missed a link state change */ - if (efx->type->monitor != NULL) { + /* Start the hardware monitor if there is one */ + if (efx->type->monitor != NULL) queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); - } else { + + /* If link state detection is normally event-driven, we have + * to poll now because we could have missed a change + */ + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { mutex_lock(&efx->mac_lock); if (efx->phy_op->poll(efx)) efx_link_status_changed(efx); @@ -2309,7 +2338,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) out: /* Leave device stopped if necessary */ - disabled = rc || method == RESET_TYPE_DISABLE; + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; @@ -2328,13 +2359,48 @@ out: return rc; } +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +static int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = + of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); - unsigned long pending = ACCESS_ONCE(efx->reset_pending); + unsigned long pending; + enum reset_type method; + + pending = ACCESS_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_try_recovery(efx)) + return; if (!pending) return; @@ -2346,7 +2412,7 @@ static void efx_reset_work(struct work_struct *data) * it cannot change again. */ if (efx->state == STATE_READY) - (void)efx_reset(efx, fls(pending) - 1); + (void)efx_reset(efx, method); rtnl_unlock(); } @@ -2355,11 +2421,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + switch (type) { case RESET_TYPE_INVISIBLE: case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); @@ -2569,6 +2644,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_fini_struct(efx); pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); }; /* NIC VPD information @@ -2741,6 +2818,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev, netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); + rc = pci_enable_pcie_error_reporting(pci_dev); + if (rc && rc != -EINVAL) + netif_warn(efx, probe, efx->net_dev, + "pci_enable_pcie_error_reporting failed (%d)\n", rc); + return 0; fail4: @@ -2865,12 +2947,112 @@ static const struct dev_pm_ops efx_pm_ops = { .restore = efx_pm_resume, }; +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_stop_interrupts(efx, false); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + int rc; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + rc = pci_cleanup_aer_uncorrect_error_status(pdev); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); + /* Non-fatal error. Continue. */ + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +static struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, + .err_handler = &efx_err_handlers, }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index d2f790df6dcb..8372da239b43 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ +extern void efx_rx_config_page_split(struct efx_nic *efx); extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_rx_strategy(struct efx_channel *channel); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); extern void efx_rx_slow_fill(unsigned long context); -extern void __efx_rx_packet(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf); -extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, +extern void __efx_rx_packet(struct efx_channel *channel); +extern void efx_rx_packet(struct efx_rx_queue *rx_queue, + unsigned int index, unsigned int n_frags, unsigned int len, u16 flags); +static inline void efx_rx_flush_packet(struct efx_channel *channel) +{ + if (channel->rx_pkt_n_frags) + __efx_rx_packet(channel); +} extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_DMAQ_SIZE 4096UL @@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern int efx_probe_filters(struct efx_nic *efx); extern void efx_restore_filters(struct efx_nic *efx); extern void efx_remove_filters(struct efx_nic *efx); +extern void efx_filter_update_rx_scatter(struct efx_nic *efx); extern s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace); diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 182dbe2cc6e4..ab8fb5889e55 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -137,8 +137,12 @@ enum efx_loopback_mode { * Reset methods are numbered in order of increasing scope. * * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. * @RESET_TYPE_ALL: Reset datapath, MAC and PHY * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if + * unsuccessful. * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_INT_ERROR: reset due to internal error @@ -150,9 +154,11 @@ enum efx_loopback_mode { */ enum reset_type { RESET_TYPE_INVISIBLE = 0, - RESET_TYPE_ALL = 1, - RESET_TYPE_WORLD = 2, - RESET_TYPE_DISABLE = 3, + RESET_TYPE_RECOVER_OR_ALL = 1, + RESET_TYPE_ALL = 2, + RESET_TYPE_WORLD = 3, + RESET_TYPE_RECOVER_OR_DISABLE = 4, + RESET_TYPE_DISABLE = 5, RESET_TYPE_MAX_METHOD, RESET_TYPE_TX_WATCHDOG, RESET_TYPE_INT_ERROR, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8e61cd06f66a..6e768175e7e0 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), }; /* Number of ethtool statistics */ @@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, rule->m_ext.data[1])) return -EINVAL; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0, + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 0xfff : rule->ring_cookie); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 49bcd196e10d..4486102fa9b3 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx) static void falcon_init_rx_cfg(struct efx_nic *efx) { - /* Prior to Siena the RX DMA engine will split each frame at - * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to - * be so large that that never happens. */ - const unsigned huge_buf_size = (3 * 4096) >> 5; /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; @@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { - /* Data FIFO size is 5.5K */ + /* Data FIFO size is 5.5K. The RX DMA engine only + * supports scattering for user-mode queues, but will + * split DMA writes at intervals of RX_USR_BUF_SIZE + * (32-byte units) even for kernel-mode queues. We + * set it to be so large that that never happens. + */ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, - huge_buf_size); + (3 * 4096) >> 5); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); @@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) /* Data FIFO size is 80K; register fields moved */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, - huge_buf_size); + EFX_RX_USR_BUF_SIZE >> 5); /* Send XON and XOFF at ~3 * max MTU away from empty/full */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); @@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_padding = 0x24, + .can_rx_scatter = false, .max_interrupt_mode = EFX_INT_MODE_MSI, .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, @@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, + .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 8af42cd1feda..2397f0e8d3eb 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -66,6 +66,10 @@ struct efx_filter_state { #endif }; +static void efx_filter_table_clear_entry(struct efx_nic *efx, + struct efx_filter_table *table, + unsigned int filter_idx); + /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. The initial LFSR state is 0xffff. */ static u16 efx_filter_hash(u32 key) @@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx) filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); } efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); @@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; struct efx_filter_spec *spec = &table->spec[filter_idx]; + enum efx_filter_flags flags = 0; + + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + if (efx->n_rx_channels > 1) + flags |= EFX_FILTER_FLAG_RX_RSS; - efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, - EFX_FILTER_FLAG_RX_RSS, 0); + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + + efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0); spec->type = EFX_FILTER_UC_DEF + filter_idx; table->used_bitmap[0] |= 1 << filter_idx; } @@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) break; } - case EFX_FILTER_TABLE_RX_DEF: - /* One filter spec per type */ - BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); - BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != - EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); - return spec->type - EFX_FILTER_UC_DEF; - case EFX_FILTER_TABLE_RX_MAC: { bool is_wild = spec->type == EFX_FILTER_MAC_WILD; EFX_POPULATE_OWORD_7( @@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, return true; } -static int efx_filter_search(struct efx_filter_table *table, - struct efx_filter_spec *spec, u32 key, - bool for_insert, unsigned int *depth_required) -{ - unsigned hash, incr, filter_idx, depth, depth_max; - - hash = efx_filter_hash(key); - incr = efx_filter_increment(key); - - filter_idx = hash & (table->size - 1); - depth = 1; - depth_max = (for_insert ? - (spec->priority <= EFX_FILTER_PRI_HINT ? - FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) : - table->search_depth[spec->type]); - - for (;;) { - /* Return success if entry is used and matches this spec - * or entry is unused and we are trying to insert. - */ - if (test_bit(filter_idx, table->used_bitmap) ? - efx_filter_equal(spec, &table->spec[filter_idx]) : - for_insert) { - *depth_required = depth; - return filter_idx; - } - - /* Return failure if we reached the maximum search depth */ - if (depth == depth_max) - return for_insert ? -EBUSY : -ENOENT; - - filter_idx = (filter_idx + incr) & (table->size - 1); - ++depth; - } -} - /* * Construct/deconstruct external filter IDs. At least the RX filter * IDs must be ordered by matching priority, for RX NFC semantics. @@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) * efx_filter_insert_filter - add or replace a filter * @efx: NIC in which to insert the filter * @spec: Specification for the filter - * @replace: Flag for whether the specified filter may replace a filter - * with an identical match expression and equal or lower priority + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority * * On success, return the filter ID. * On failure, return a negative error code. + * + * If an existing filter has equal match values to the new filter + * spec, then the new filter might replace it, depending on the + * relative priorities. If the existing filter has lower priority, or + * if @replace_equal is set and it has equal priority, then it is + * replaced. Otherwise the function fails, returning -%EPERM if + * the existing filter has higher priority or -%EEXIST if it has + * equal priority. */ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, - bool replace) + bool replace_equal) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); - struct efx_filter_spec *saved_spec; efx_oword_t filter; - unsigned int filter_idx, depth = 0; - u32 key; + int rep_index, ins_index; + unsigned int depth = 0; int rc; if (!table || table->size == 0) return -EINVAL; - key = efx_filter_build(&filter, spec); - netif_vdbg(efx, hw, efx->net_dev, "%s: type %d search_depth=%d", __func__, spec->type, table->search_depth[spec->type]); - spin_lock_bh(&state->lock); + if (table->id == EFX_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != + EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); + rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; + ins_index = rep_index; - rc = efx_filter_search(table, spec, key, true, &depth); - if (rc < 0) - goto out; - filter_idx = rc; - BUG_ON(filter_idx >= table->size); - saved_spec = &table->spec[filter_idx]; - - if (test_bit(filter_idx, table->used_bitmap)) { - /* Should we replace the existing filter? */ - if (!replace) { + spin_lock_bh(&state->lock); + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_filter_build(&filter, spec); + unsigned int hash = efx_filter_hash(key); + unsigned int incr = efx_filter_increment(key); + unsigned int max_rep_depth = table->search_depth[spec->type]; + unsigned int max_ins_depth = + spec->priority <= EFX_FILTER_PRI_HINT ? + FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + spin_lock_bh(&state->lock); + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_filter_equal(spec, &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_filter_spec *saved_spec = &table->spec[rep_index]; + + if (spec->priority == saved_spec->priority && !replace_equal) { rc = -EEXIST; goto out; } @@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, rc = -EPERM; goto out; } - } else { - __set_bit(filter_idx, table->used_bitmap); + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); ++table->used; } - *saved_spec = *spec; + table->spec[ins_index] = *spec; if (table->id == EFX_FILTER_TABLE_RX_DEF) { efx_filter_push_rx_config(efx); @@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, } efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_filter_table_clear_entry(efx, table, rep_index); } netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", - __func__, spec->type, filter_idx, spec->dmaq_id); - rc = efx_filter_make_id(spec, filter_idx); + __func__, spec->type, ins_index, spec->dmaq_id); + rc = efx_filter_make_id(spec, ins_index); out: spin_unlock_bh(&state->lock); @@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx) kfree(state); } +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_filter_state *state = efx->filter_state; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&state->lock); + + for (table_id = EFX_FILTER_TABLE_RX_IP; + table_id <= EFX_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FILTER_TABLE_RX_DEF) + /* Pushed by efx_filter_push_rx_config() */ + continue; + + efx_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_filter_push_rx_config(efx); + + spin_unlock_bh(&state->lock); +} + #ifdef CONFIG_RFS_ACCEL int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 9d426d0457bd..c5c9747861ba 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -553,6 +553,7 @@ #define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ #define MC_CMD_PTP_MODE_V2 0x2 /* enum */ #define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */ /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 0a90abd2421b..9bd433a095c5 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -69,6 +69,12 @@ #define EFX_TXQ_TYPES 4 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) + +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ +#define EFX_RX_USR_BUF_SIZE 1824 + /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; @@ -206,25 +212,23 @@ struct efx_tx_queue { /** * struct efx_rx_buffer - An Efx RX data buffer * @dma_addr: DMA base address of the buffer - * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). - * Will be %NULL if the buffer slot is currently free. - * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. + * @page: The associated page buffer. * Will be %NULL if the buffer slot is currently free. - * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. - * @len: Buffer length, in bytes. - * @flags: Flags for buffer and packet state. + * @page_offset: If pending: offset in @page of DMA base address. + * If completed: offset in @page of Ethernet header. + * @len: If pending: length for DMA descriptor. + * If completed: received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. */ struct efx_rx_buffer { dma_addr_t dma_addr; - union { - struct sk_buff *skb; - struct page *page; - } u; + struct page *page; u16 page_offset; u16 len; u16 flags; }; -#define EFX_RX_BUF_PAGE 0x0001 +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 #define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_DISCARD 0x0004 @@ -260,14 +264,23 @@ struct efx_rx_page_state { * @added_count: Number of buffers added to the receive queue. * @notified_count: Number of buffers given to NIC (<= @added_count). * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Number of buffers used by current packet + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. * @max_fill: RX descriptor maximum fill level (<= ring size) * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill * (<= @max_fill) * @min_fill: RX descriptor minimum non-zero fill level. * This records the minimum fill level observed when a ring * refill was triggered. - * @alloc_page_count: RX allocation strategy counter. - * @alloc_skb_count: RX allocation strategy counter. + * @recycle_count: RX buffer recycle counter. * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). */ struct efx_rx_queue { @@ -279,15 +292,22 @@ struct efx_rx_queue { bool enabled; bool flush_pending; - int added_count; - int notified_count; - int removed_count; + unsigned int added_count; + unsigned int notified_count; + unsigned int removed_count; + unsigned int scatter_n; + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_ptr_mask; unsigned int max_fill; unsigned int fast_fill_trigger; unsigned int min_fill; unsigned int min_overfill; - unsigned int alloc_page_count; - unsigned int alloc_skb_count; + unsigned int recycle_count; struct timer_list slow_fill; unsigned int slow_fill_count; }; @@ -336,10 +356,6 @@ enum efx_rx_alloc_method { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score - * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors - * and diagnostic counters - * @rx_alloc_push_pages: RX allocation method currently in use for pushing - * descriptors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors @@ -347,6 +363,12 @@ enum efx_rx_alloc_method { * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors * @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel */ @@ -371,9 +393,6 @@ struct efx_channel { unsigned int rfs_filters_added; #endif - int rx_alloc_level; - int rx_alloc_push_pages; - unsigned n_rx_tobe_disc; unsigned n_rx_ip_hdr_chksum_err; unsigned n_rx_tcp_udp_chksum_err; @@ -381,11 +400,10 @@ struct efx_channel { unsigned n_rx_frm_trunc; unsigned n_rx_overlength; unsigned n_skbuff_leaks; + unsigned int n_rx_nodesc_trunc; - /* Used to pipeline received packets in order to optimise memory - * access with prefetches. - */ - struct efx_rx_buffer *rx_pkt; + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; @@ -410,7 +428,7 @@ struct efx_channel_type { void (*post_remove)(struct efx_channel *); void (*get_name)(struct efx_channel *, char *buf, size_t len); struct efx_channel *(*copy)(const struct efx_channel *); - void (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*receive_skb)(struct efx_channel *, struct sk_buff *); bool keep_eventq; }; @@ -446,6 +464,7 @@ enum nic_state { STATE_UNINIT = 0, /* device being probed/removed or is frozen */ STATE_READY = 1, /* hardware ready and netdev registered */ STATE_DISABLED = 2, /* device disabled due to hardware errors */ + STATE_RECOVERY = 3, /* device recovering from PCI error */ }; /* @@ -684,10 +703,13 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX - * @rx_buffer_len: RX buffer length + * @rx_dma_len: Current maximum RX DMA length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in sk_buff::truesize * @rx_hash_key: Toeplitz hash key for RSS * @rx_indir_table: Indirection table for RSS + * @rx_scatter: Scatter mode enabled for receives * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired * @irq_status: Interrupt status buffer @@ -800,10 +822,15 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; - unsigned int rx_buffer_len; + unsigned int rx_dma_len; unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; u8 rx_hash_key[40]; u32 rx_indir_table[128]; + bool rx_scatter; unsigned int_error_count; unsigned long int_error_expire; @@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @evq_ptr_tbl_base: Event queue pointer table base address * @evq_rptr_tbl_base: Event queue read-pointer table base address * @max_dma_mask: Maximum possible DMA mask - * @rx_buffer_hash_size: Size of hash at start of RX buffer - * @rx_buffer_padding: Size of padding at end of RX buffer + * @rx_buffer_hash_size: Size of hash at start of RX packet + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @phys_addr_channels: Number of channels with physically addressed @@ -983,6 +1011,7 @@ struct efx_nic_type { u64 max_dma_mask; unsigned int rx_buffer_hash_size; unsigned int rx_buffer_padding; + bool can_rx_scatter; unsigned int max_interrupt_mode; unsigned int phys_addr_channels; unsigned int timer_period_max; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 0ad790cc473c..f9f5df8b51fe 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -591,12 +591,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; bool iscsi_digest_en = is_b0; + bool jumbo_en; + + /* For kernel-mode queues in Falcon A1, the JUMBO flag enables + * DMA to continue after a PCIe page boundary (and scattering + * is not possible). In Falcon B0 and Siena, it enables + * scatter. + */ + jumbo_en = !is_b0 || efx->rx_scatter; netif_dbg(efx, hw, efx->net_dev, "RX queue %d ring in special buffers %d-%d\n", efx_rx_queue_index(rx_queue), rx_queue->rxd.index, rx_queue->rxd.index + rx_queue->rxd.entries - 1); + rx_queue->scatter_n = 0; + /* Pin RX descriptor ring */ efx_init_special_buffer(efx, &rx_queue->rxd); @@ -613,8 +623,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) FRF_AZ_RX_DESCQ_SIZE, __ffs(rx_queue->rxd.entries), FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , - /* For >=B0 this is scatter so disable */ - FRF_AZ_RX_DESCQ_JUMBO, !is_b0, + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, FRF_AZ_RX_DESCQ_EN, 1); efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, efx_rx_queue_index(rx_queue)); @@ -968,13 +977,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, EFX_RX_PKT_DISCARD : 0; } -/* Handle receive events that are not in-order. */ -static void +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) { + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_nic *efx = rx_queue->efx; unsigned expected, dropped; + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + expected = rx_queue->removed_count & rx_queue->ptr_mask; dropped = (index - expected) & rx_queue->ptr_mask; netif_info(efx, rx_err, efx->net_dev, @@ -983,6 +1003,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return false; } /* Handle a packet received event @@ -998,7 +1019,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned expected_ptr; - bool rx_ev_pkt_ok; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; u16 flags; struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; @@ -1006,21 +1027,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) if (unlikely(ACCESS_ONCE(efx->reset_pending))) return; - /* Basic packet information */ - rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); - rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); - rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != channel->channel); rx_queue = efx_channel_get_rx_queue(channel); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); - expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; - if (unlikely(rx_ev_desc_ptr != expected_ptr)) - efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); if (likely(rx_ev_pkt_ok)) { /* If packet is marked as OK and packet type is TCP/IP or @@ -1048,7 +1104,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) channel->irq_mod_score += 2; /* Handle received packet */ - efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; } /* If this flush done event corresponds to a &struct efx_tx_queue, then diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 3f93624fc273..07f6baa15c0c 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -99,6 +99,9 @@ #define PTP_V2_VERSION_LENGTH 1 #define PTP_V2_VERSION_OFFSET 29 +#define PTP_V2_UUID_LENGTH 8 +#define PTP_V2_UUID_OFFSET 48 + /* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), * the MC only captures the last six bytes of the clock identity. These values * reflect those, not the ones used in the standard. The standard permits @@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, unsigned number_readings = (response_length / MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); unsigned i; - unsigned min; - unsigned min_set = 0; unsigned total; unsigned ngood = 0; unsigned last_good = 0; struct efx_ptp_data *ptp = efx->ptp_data; - bool min_valid = false; u32 last_sec; u32 start_sec; struct timespec delta; @@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, if (number_readings == 0) return -EAGAIN; - /* Find minimum value in this set of results, discarding clearly - * erroneous results. + /* Read the set of results and increment stats for any results that + * appera to be erroneous. */ for (i = 0; i < number_readings; i++) { efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; - if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) { - if (min_valid) { - if (ptp->timeset[i].window < min_set) - min_set = ptp->timeset[i].window; - } else { - min_valid = true; - min_set = ptp->timeset[i].window; - } - } - } - - if (min_valid) { - if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns)) - min = ptp->base_sync_ns; - else - min = min_set; - } else { - min = SYNCHRONISATION_GRANULARITY_NS; } - /* Discard excessively long synchronise durations. The MC times - * when it finishes reading the host time so the corrected window - * time should be fairly constant for a given platform. + /* Find the last good host-MC synchronization result. The MC times + * when it finishes reading the host time so the corrected window time + * should be fairly constant for a given platform. */ total = 0; for (i = 0; i < number_readings; i++) @@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, if (ngood == 0) { netif_warn(efx, drv, efx->net_dev, - "PTP no suitable synchronisations %dns %dns\n", - ptp->base_sync_ns, min_set); + "PTP no suitable synchronisations %dns\n", + ptp->base_sync_ns); return -EAGAIN; } @@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) * the receive timestamp from the MC - this will probably occur after the * packet arrival because of the processing in the MC. */ -static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) { struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; - u8 *data; + u8 *match_data_012, *match_data_345; unsigned int version; match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); /* Correct version? */ if (ptp->mode == MC_CMD_PTP_MODE_V1) { - if (skb->len < PTP_V1_MIN_LENGTH) { - netif_receive_skb(skb); - return; + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { + return false; } version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); if (version != PTP_VERSION_V1) { - netif_receive_skb(skb); - return; + return false; } + + /* PTP V1 uses all six bytes of the UUID to match the packet + * to the timestamp + */ + match_data_012 = skb->data + PTP_V1_UUID_OFFSET; + match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; } else { - if (skb->len < PTP_V2_MIN_LENGTH) { - netif_receive_skb(skb); - return; + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { + return false; } version = skb->data[PTP_V2_VERSION_OFFSET]; - - BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2); - BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET); - BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH); - BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); - BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); - if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { - netif_receive_skb(skb); - return; + return false; + } + + /* The original V2 implementation uses bytes 2-7 of + * the UUID to match the packet to the timestamp. This + * discards two of the bytes of the MAC address used + * to create the UUID (SF bug 33070). The PTP V2 + * enhanced mode fixes this issue and uses bytes 0-2 + * and byte 5-7 of the UUID. + */ + match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; + if (ptp->mode == MC_CMD_PTP_MODE_V2) { + match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; + } else { + match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); } } @@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) timestamps = skb_hwtstamps(skb); memset(timestamps, 0, sizeof(*timestamps)); + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + /* Extract UUID/Sequence information */ - data = skb->data + PTP_V1_UUID_OFFSET; - match->words[0] = (data[0] | - (data[1] << 8) | - (data[2] << 16) | - (data[3] << 24)); - match->words[1] = (data[4] | - (data[5] << 8) | + match->words[0] = (match_data_012[0] | + (match_data_012[1] << 8) | + (match_data_012[2] << 16) | + (match_data_345[0] << 24)); + match->words[1] = (match_data_345[1] | + (match_data_345[2] << 8) | (skb->data[PTP_V1_SEQUENCE_OFFSET + PTP_V1_SEQUENCE_LENGTH - 1] << 16)); @@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) skb_queue_tail(&ptp->rxq, skb); queue_work(ptp->workwq, &ptp->work); + + return true; } /* Transmit a PTP packet. This has to be transmitted by the MC @@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) * timestamped */ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - new_mode = MC_CMD_PTP_MODE_V2; + new_mode = MC_CMD_PTP_MODE_V2_ENHANCED; enable_wanted = true; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: @@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) if (init->tx_type != HWTSTAMP_TX_OFF) enable_wanted = true; + /* Old versions of the firmware do not support the improved + * UUID filtering option (SF bug 33070). If the firmware does + * not accept the enhanced mode, fall back to the standard PTP + * v2 UUID filtering. + */ rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); + if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED)) + rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index bb579a6128c8..a948b36c1910 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -16,6 +16,7 @@ #include <linux/udp.h> #include <linux/prefetch.h> #include <linux/moduleparam.h> +#include <linux/iommu.h> #include <net/ip.h> #include <net/checksum.h> #include "net_driver.h" @@ -24,85 +25,39 @@ #include "selftest.h" #include "workarounds.h" -/* Number of RX descriptors pushed at once. */ -#define EFX_RX_BATCH 8 +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U -/* Maximum size of a buffer sharing a page */ -#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_IOMMU 4096 +#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) /* Size of buffer allocated for skb header area. */ #define EFX_SKB_HEADERS 64u -/* - * rx_alloc_method - RX buffer allocation method - * - * This driver supports two methods for allocating and using RX buffers: - * each RX buffer may be backed by an skb or by an order-n page. - * - * When GRO is in use then the second method has a lower overhead, - * since we don't have to allocate then free skbs on reassembled frames. - * - * Values: - * - RX_ALLOC_METHOD_AUTO = 0 - * - RX_ALLOC_METHOD_SKB = 1 - * - RX_ALLOC_METHOD_PAGE = 2 - * - * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count - * controlled by the parameters below. - * - * - Since pushing and popping descriptors are separated by the rx_queue - * size, so the watermarks should be ~rxd_size. - * - The performance win by using page-based allocation for GRO is less - * than the performance hit of using page-based allocation of non-GRO, - * so the watermarks should reflect this. - * - * Per channel we maintain a single variable, updated by each channel: - * - * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : - * RX_ALLOC_FACTOR_SKB) - * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which - * limits the hysteresis), and update the allocation strategy: - * - * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? - * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) - */ -static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; - -#define RX_ALLOC_LEVEL_GRO 0x2000 -#define RX_ALLOC_LEVEL_MAX 0x3000 -#define RX_ALLOC_FACTOR_GRO 1 -#define RX_ALLOC_FACTOR_SKB (-2) - /* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring. */ static unsigned int rx_refill_threshold; +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + /* * RX maximum head room required. * - * This must be at least 1 to prevent overflow and at least 2 to allow - * pipelined receives. + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. */ -#define EFX_RXD_HEAD_ROOM 2 +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) -/* Offset of ethernet header within page */ -static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, - struct efx_rx_buffer *buf) +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) { - return buf->page_offset + efx->type->rx_buffer_hash_size; -} -static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) -{ - return PAGE_SIZE << efx->rx_buffer_order; -} - -static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) -{ - if (buf->flags & EFX_RX_BUF_PAGE) - return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); - else - return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; + return page_address(buf->page) + buf->page_offset; } static inline u32 efx_rx_buf_hash(const u8 *eh) @@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh) #endif } -/** - * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers - * - * @rx_queue: Efx RX queue - * - * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a - * struct efx_rx_buffer for each one. Return a negative error code or 0 - * on success. May fail having only inserted fewer than EFX_RX_BATCH - * buffers. - */ -static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + +void efx_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, + L1_CACHE_BYTES); + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; - struct net_device *net_dev = efx->net_dev; - struct efx_rx_buffer *rx_buf; - struct sk_buff *skb; - int skb_len = efx->rx_buffer_len; - unsigned index, count; + struct page *page; + struct efx_rx_page_state *state; + unsigned index; - for (count = 0; count < EFX_RX_BATCH; ++count) { - index = rx_queue->added_count & rx_queue->ptr_mask; - rx_buf = efx_rx_buffer(rx_queue, index); - - rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); - if (unlikely(!skb)) - return -ENOMEM; - - /* Adjust the SKB for padding */ - skb_reserve(skb, NET_IP_ALIGN); - rx_buf->len = skb_len - NET_IP_ALIGN; - rx_buf->flags = 0; - - rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, - skb->data, rx_buf->len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&efx->pci_dev->dev, - rx_buf->dma_addr))) { - dev_kfree_skb_any(skb); - rx_buf->u.skb = NULL; - return -EIO; - } + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + page = rx_queue->page_ring[index]; + if (page == NULL) + return NULL; + + rx_queue->page_ring[index] = NULL; + /* page_remove cannot exceed page_add. */ + if (rx_queue->page_remove != rx_queue->page_add) + ++rx_queue->page_remove; - ++rx_queue->added_count; - ++rx_queue->alloc_skb_count; + /* If page_count is 1 then we hold the only reference to this page. */ + if (page_count(page) == 1) { + ++rx_queue->page_recycle_count; + return page; + } else { + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + ++rx_queue->page_recycle_failed; } - return 0; + return NULL; } /** - * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers + * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers * * @rx_queue: Efx RX queue * - * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, - * and populates struct efx_rx_buffers for each one. Return a negative error - * code or 0 on success. If a single page can be split between two buffers, - * then the page will either be inserted fully, or not at at all. + * This allocates a batch of pages, maps them for DMA, and populates + * struct efx_rx_buffers for each one. Return a negative error code or + * 0 on success. If a single page can be used for multiple buffers, + * then the page will either be inserted fully, or not at all. */ -static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; @@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) dma_addr_t dma_addr; unsigned index, count; - /* We can split a page between two buffers */ - BUILD_BUG_ON(EFX_RX_BATCH & 1); - - for (count = 0; count < EFX_RX_BATCH; ++count) { - page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, - efx->rx_buffer_order); - if (unlikely(page == NULL)) - return -ENOMEM; - dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, - efx_rx_buf_size(efx), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { - __free_pages(page, efx->rx_buffer_order); - return -EIO; + count = 0; + do { + page = efx_reuse_page(rx_queue); + if (page == NULL) { + page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, + efx->rx_buffer_order); + if (unlikely(page == NULL)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; + } else { + state = page_address(page); + dma_addr = state->dma_addr; } - state = page_address(page); - state->refcnt = 0; - state->dma_addr = dma_addr; dma_addr += sizeof(struct efx_rx_page_state); page_offset = sizeof(struct efx_rx_page_state); - split: - index = rx_queue->added_count & rx_queue->ptr_mask; - rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; - rx_buf->u.page = page; - rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; - rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; - rx_buf->flags = EFX_RX_BUF_PAGE; - ++rx_queue->added_count; - ++rx_queue->alloc_page_count; - ++state->refcnt; - - if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { - /* Use the second half of the page */ + do { + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; + rx_buf->page = page; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = 0; + ++rx_queue->added_count; get_page(page); - dma_addr += (PAGE_SIZE >> 1); - page_offset += (PAGE_SIZE >> 1); - ++count; - goto split; - } - } + dma_addr += efx->rx_page_buf_step; + page_offset += efx->rx_page_buf_step; + } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); + + rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; + } while (++count < efx->rx_pages_per_batch); return 0; } +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf, - unsigned int used_len) + struct efx_rx_buffer *rx_buf) { - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { - struct efx_rx_page_state *state; - - state = page_address(rx_buf->u.page); - if (--state->refcnt == 0) { - dma_unmap_page(&efx->pci_dev->dev, - state->dma_addr, - efx_rx_buf_size(efx), - DMA_FROM_DEVICE); - } else if (used_len) { - dma_sync_single_for_cpu(&efx->pci_dev->dev, - rx_buf->dma_addr, used_len, - DMA_FROM_DEVICE); - } - } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { - dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, - rx_buf->len, DMA_FROM_DEVICE); + struct page *page = rx_buf->page; + + if (page) { + struct efx_rx_page_state *state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); } } -static void efx_free_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) +static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) { - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { - __free_pages(rx_buf->u.page, efx->rx_buffer_order); - rx_buf->u.page = NULL; - } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { - dev_kfree_skb_any(rx_buf->u.skb); - rx_buf->u.skb = NULL; + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; } } -static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); - efx_free_rx_buffer(rx_queue->efx, rx_buf); -} + struct page *page = rx_buf->page; + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + unsigned index; -/* Attempt to resurrect the other receive buffer that used to share this page, - * which had previously been passed up to the kernel and freed. */ -static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) -{ - struct efx_rx_page_state *state = page_address(rx_buf->u.page); - struct efx_rx_buffer *new_buf; - unsigned fill_level, index; - - /* +1 because efx_rx_packet() incremented removed_count. +1 because - * we'd like to insert an additional descriptor whilst leaving - * EFX_RXD_HEAD_ROOM for the non-recycle path */ - fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); - if (unlikely(fill_level > rx_queue->max_fill)) { - /* We could place "state" on a list, and drain the list in - * efx_fast_push_rx_descriptors(). For now, this will do. */ + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) return; - } - ++state->refcnt; - get_page(rx_buf->u.page); + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + unsigned read_index = rx_queue->page_remove & + rx_queue->page_ptr_mask; - index = rx_queue->added_count & rx_queue->ptr_mask; - new_buf = efx_rx_buffer(rx_queue, index); - new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); - new_buf->u.page = rx_buf->u.page; - new_buf->len = rx_buf->len; - new_buf->flags = EFX_RX_BUF_PAGE; - ++rx_queue->added_count; + /* The next slot in the recycle ring is available, but + * increment page_remove if the read pointer currently + * points here. + */ + if (read_index == index) + ++rx_queue->page_remove; + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); } -/* Recycle the given rx buffer directly back into the rx_queue. There is - * always room to add this buffer, because we've just popped a buffer. */ -static void efx_recycle_rx_buffer(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf) +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) { - struct efx_nic *efx = channel->efx; - struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - struct efx_rx_buffer *new_buf; - unsigned index; - - rx_buf->flags &= EFX_RX_BUF_PAGE; - - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && - efx->rx_buffer_len <= EFX_RX_HALF_PAGE && - page_count(rx_buf->u.page) == 1) - efx_resurrect_rx_buffer(rx_queue, rx_buf); + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_free_rx_buffer(rx_buf); + } + rx_buf->page = NULL; +} - index = rx_queue->added_count & rx_queue->ptr_mask; - new_buf = efx_rx_buffer(rx_queue, index); +/* Recycle the pages that are used by buffers that have just been received. */ +static void efx_recycle_rx_buffers(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - memcpy(new_buf, rx_buf, sizeof(*new_buf)); - rx_buf->u.page = NULL; - ++rx_queue->added_count; + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); } /** @@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, */ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) { - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - unsigned fill_level; + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; int space, rc = 0; /* Calculate current fill level, and exit if we don't need to fill */ @@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) rx_queue->min_fill = fill_level; } + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; space = rx_queue->max_fill - fill_level; - EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); + EFX_BUG_ON_PARANOID(space < batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" - " level %d to level %d using %s allocation\n", + " level %d to level %d\n", efx_rx_queue_index(rx_queue), fill_level, - rx_queue->max_fill, - channel->rx_alloc_push_pages ? "page" : "skb"); + rx_queue->max_fill); + do { - if (channel->rx_alloc_push_pages) - rc = efx_init_rx_buffers_page(rx_queue); - else - rc = efx_init_rx_buffers_skb(rx_queue); + rc = efx_init_rx_buffers(rx_queue); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count) efx_schedule_slow_fill(rx_queue); goto out; } - } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); + } while ((space -= batch_size) >= batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filled descriptor ring " @@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context) static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, - int len, bool *leak_packet) + int len) { struct efx_nic *efx = rx_queue->efx; unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; @@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, "RX event (0x%x > 0x%x+0x%x). Leaking\n", efx_rx_queue_index(rx_queue), len, max_len, efx->type->rx_buffer_padding); - /* If this buffer was skb-allocated, then the meta - * data at the end of the skb will be trashed. So - * we have no choice but to leak the fragment. - */ - *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { if (net_ratelimit()) @@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, /* Pass a received packet up through GRO. GRO can handle pages * regardless of checksum state and skbs with a good checksum. */ -static void efx_rx_packet_gro(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - const u8 *eh) +static void +efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh) { struct napi_struct *napi = &channel->napi_str; gro_result_t gro_result; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; - if (rx_buf->flags & EFX_RX_BUF_PAGE) { - struct efx_nic *efx = channel->efx; - struct page *page = rx_buf->u.page; - struct sk_buff *skb; + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + while (n_frags--) { + put_page(rx_buf->page); + rx_buf->page = NULL; + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + return; + } - rx_buf->u.page = NULL; + if (efx->net_dev->features & NETIF_F_RXHASH) + skb->rxhash = efx_rx_buf_hash(eh); + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } - skb = napi_get_frags(napi); - if (!skb) { - put_page(page); - return; - } + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + gro_result = napi_gro_frags(napi); + if (gro_result != GRO_DROP) + channel->irq_mod_score += 2; +} - if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(eh); +/* Allocate and construct an SKB around page fragments */ +static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, + u8 *eh, int hdr_len) +{ + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; - skb_fill_page_desc(skb, 0, page, - efx_rx_buf_offset(efx, rx_buf), rx_buf->len); + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); + if (unlikely(skb == NULL)) + return NULL; - skb->len = rx_buf->len; - skb->data_len = rx_buf->len; - skb->truesize += rx_buf->len; - skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? - CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); - skb_record_rx_queue(skb, channel->rx_queue.core_index); + skb_reserve(skb, EFX_PAGE_SKB_ALIGN); + memcpy(__skb_put(skb, hdr_len), eh, hdr_len); - gro_result = napi_gro_frags(napi); - } else { - struct sk_buff *skb = rx_buf->u.skb; + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; - EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); - rx_buf->u.skb = NULL; - skb->ip_summed = CHECKSUM_UNNECESSARY; + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + skb->data_len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; - gro_result = napi_gro_receive(napi, skb); + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + } else { + __free_pages(rx_buf->page, efx->rx_buffer_order); + rx_buf->page = NULL; + n_frags = 0; } - if (gro_result == GRO_NORMAL) { - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - } else if (gro_result != GRO_DROP) { - channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; - channel->irq_mod_score += 2; - } + skb->truesize += n_frags * efx->rx_buffer_truesize; + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + return skb; } void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, - unsigned int len, u16 flags) + unsigned int n_frags, unsigned int len, u16 flags) { struct efx_nic *efx = rx_queue->efx; struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_rx_buffer *rx_buf; - bool leak_packet = false; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->flags |= flags; - /* This allows the refill path to post another buffer. - * EFX_RXD_HEAD_ROOM ensures that the slot we are using - * isn't overwritten yet. - */ - rx_queue->removed_count++; - - /* Validate the length encoded in the event vs the descriptor pushed */ - efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || + unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } netif_vdbg(efx, rx_status, efx->net_dev, - "RX queue %d received id %x at %llx+%x %s%s\n", + "RX queue %d received ids %x-%x len %d %s%s\n", efx_rx_queue_index(rx_queue), index, - (unsigned long long)rx_buf->dma_addr, len, + (index + n_frags - 1) & rx_queue->ptr_mask, len, (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); - /* Discard packet, if instructed to do so */ + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { - if (unlikely(leak_packet)) - channel->n_skbuff_leaks++; - else - efx_recycle_rx_buffer(channel, rx_buf); - - /* Don't hold off the previous receive */ - rx_buf = NULL; - goto out; + efx_rx_flush_packet(channel); + put_page(rx_buf->page); + efx_recycle_rx_buffers(channel, rx_buf, n_frags); + return; } - /* Release and/or sync DMA mapping - assumes all RX buffers - * consumed in-order per RX queue + if (n_frags == 1) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. */ - efx_unmap_rx_buffer(efx, rx_buf, len); + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. */ - prefetch(efx_rx_buf_eh(efx, rx_buf)); + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->type->rx_buffer_hash_size; + rx_buf->len -= efx->type->rx_buffer_hash_size; + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); + } + rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle buffers and pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_recycle_rx_buffers(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. */ - rx_buf->len = len - efx->type->rx_buffer_hash_size; -out: - if (channel->rx_pkt) - __efx_rx_packet(channel, channel->rx_pkt); - channel->rx_pkt = rx_buf; + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = n_frags; + channel->rx_pkt_index = index; } -static void efx_rx_deliver(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf) +static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct sk_buff *skb; + u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); - /* We now own the SKB */ - skb = rx_buf->u.skb; - rx_buf->u.skb = NULL; + skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); + if (unlikely(skb == NULL)) { + efx_free_rx_buffer(rx_buf); + return; + } + skb_record_rx_queue(skb, channel->rx_queue.core_index); /* Set the SKB flags */ skb_checksum_none_assert(skb); - /* Record the rx_queue */ - skb_record_rx_queue(skb, channel->rx_queue.core_index); - - /* Pass the packet up */ if (channel->type->receive_skb) - channel->type->receive_skb(channel, skb); - else - netif_receive_skb(skb); + if (channel->type->receive_skb(channel, skb)) + return; - /* Update allocation strategy method */ - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; + /* Pass the packet up */ + netif_receive_skb(skb); } /* Handle a received packet. Second half: Touches packet payload. */ -void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) +void __efx_rx_packet(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - u8 *eh = efx_rx_buf_eh(efx, rx_buf); + struct efx_rx_buffer *rx_buf = + efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + u8 *eh = efx_rx_buf_va(rx_buf); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { efx_loopback_rx_packet(efx, eh, rx_buf->len); - efx_free_rx_buffer(efx, rx_buf); - return; - } - - if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { - struct sk_buff *skb = rx_buf->u.skb; - - prefetch(skb_shinfo(skb)); - - skb_reserve(skb, efx->type->rx_buffer_hash_size); - skb_put(skb, rx_buf->len); - - if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(eh); - - /* Move past the ethernet header. rx_buf->data still points - * at the ethernet header */ - skb->protocol = eth_type_trans(skb, efx->net_dev); - - skb_record_rx_queue(skb, channel->rx_queue.core_index); + efx_free_rx_buffer(rx_buf); + goto out; } if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && - !channel->type->receive_skb) - efx_rx_packet_gro(channel, rx_buf, eh); + if (!channel->type->receive_skb) + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else - efx_rx_deliver(channel, rx_buf); -} - -void efx_rx_strategy(struct efx_channel *channel) -{ - enum efx_rx_alloc_method method = rx_alloc_method; - - if (channel->type->receive_skb) { - channel->rx_alloc_push_pages = false; - return; - } - - /* Only makes sense to use page based allocation if GRO is enabled */ - if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { - method = RX_ALLOC_METHOD_SKB; - } else if (method == RX_ALLOC_METHOD_AUTO) { - /* Constrain the rx_alloc_level */ - if (channel->rx_alloc_level < 0) - channel->rx_alloc_level = 0; - else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) - channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; - - /* Decide on the allocation method */ - method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? - RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); - } - - /* Push the option */ - channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); + efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); +out: + channel->rx_pkt_n_frags = 0; } int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) @@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) kfree(rx_queue->buffer); rx_queue->buffer = NULL; } + return rc; } +void efx_init_rx_recycle_ring(struct efx_nic *efx, + struct efx_rx_queue *rx_queue) +{ + unsigned int bufs_in_recycle_ring, page_ring_size; + + /* Set the RX recycle ring size */ +#ifdef CONFIG_PPC64 + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; +#else + if (efx->pci_dev->dev.iommu_group) + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; + else + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; +#endif /* CONFIG_PPC64 */ + + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + rx_queue->page_ptr_mask = page_ring_size - 1; +} + void efx_init_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; @@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->notified_count = 0; rx_queue->removed_count = 0; rx_queue->min_fill = -1U; + efx_init_rx_recycle_ring(efx, rx_queue); + + rx_queue->page_remove = 0; + rx_queue->page_add = rx_queue->page_ptr_mask + 1; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; /* Initialise limit fields */ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; - max_trigger = max_fill - EFX_RX_BATCH; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; if (rx_refill_threshold != 0) { trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; if (trigger > max_trigger) @@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) { int i; + struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, @@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) del_timer_sync(&rx_queue->slow_fill); efx_nic_fini_rx(rx_queue); - /* Release RX buffers NB start at index 0 not current HW ptr */ + /* Release RX buffers from the current read ptr to the write ptr */ if (rx_queue->buffer) { - for (i = 0; i <= rx_queue->ptr_mask; i++) { - rx_buf = efx_rx_buffer(rx_queue, i); + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned index = i & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); efx_fini_rx_buffer(rx_queue, rx_buf); } } + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; } void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) @@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) } -module_param(rx_alloc_method, int, 0644); -MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); - module_param(rx_refill_threshold, uint, 0444); MODULE_PARM_DESC(rx_refill_threshold, "RX descriptor ring refill threshold (%)"); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ba40f67e4f05..51669244d154 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -202,7 +202,7 @@ out: static enum reset_type siena_map_reset_reason(enum reset_type reason) { - return RESET_TYPE_ALL; + return RESET_TYPE_RECOVER_OR_ALL; } static int siena_map_reset_flags(u32 *flags) @@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) return efx_mcdi_reset_port(efx); } +#ifdef CONFIG_EEH +/* When a PCI device is isolated from the bus, a subsequent MMIO read is + * required for the kernel EEH mechanisms to notice. As the Solarflare driver + * was written to minimise MMIO read (for latency) then a periodic call to check + * the EEH status of the device is required so that device recovery can happen + * in a timely fashion. + */ +static void siena_monitor(struct efx_nic *efx) +{ + struct eeh_dev *eehdev = + of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + + eeh_dev_check_failure(eehdev); +} +#endif + static int siena_probe_nvconfig(struct efx_nic *efx) { u32 caps = 0; @@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx) EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); /* Set hash key for IPv4 */ @@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = { .init = siena_init_nic, .dimension_resources = siena_dimension_resources, .fini = efx_port_dummy_op_void, +#ifdef CONFIG_EEH + .monitor = siena_monitor, +#else .monitor = NULL, +#endif .map_reset_reason = siena_map_reset_reason, .map_reset_flags = siena_map_reset_flags, .reset = siena_reset_hw, @@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = { .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, + .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index efca14eaefa9..e45829628d5f 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1841,15 +1841,12 @@ refill_rx_ring: entry = sis_priv->dirty_rx % NUM_RX_DESC; if (sis_priv->rx_skbuff[entry] == NULL) { - if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { + skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE); + if (skb == NULL) { /* not enough memory for skbuff, this makes a * "hole" on the buffer ring, it is not clear * how the hardware will react to this kind * of degenerated buffer */ - if (netif_msg_rx_err(sis_priv)) - printk(KERN_INFO "%s: Memory squeeze, " - "deferring packet.\n", - net_dev->name); net_dev->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 50823da9dc1e..e85c2e7e8246 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev) dev->stats.multicast++; skb = netdev_alloc_skb(dev, packet_length + 5); - if ( skb == NULL ) { - printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); dev->stats.rx_dropped++; goto done; } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 591650a8de38..dfbf978315df 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev) */ skb = netdev_alloc_skb(dev, packet_len); if (unlikely(skb == NULL)) { - printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", - dev->name); SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_RELEASE); dev->stats.rx_dropped++; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index d457fa2d7509..ffa5c4ad1210 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) BUG_ON(pd->rx_buffers[index].skb); BUG_ON(pd->rx_buffers[index].mapping); - if (unlikely(!skb)) { - smsc_warn(RX_ERR, "Failed to allocate new skb!"); + if (unlikely(!skb)) return -ENOMEM; - } mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 49bf3e2eb652..8182591bc187 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep) struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; - int elem = qep->rx_new, drops = 0; + int elem = qep->rx_new; u32 flags; this = &rxbase[elem]; @@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep) } else { skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - drops++; dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); @@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep) this = &rxbase[elem]; } qep->rx_new = elem; - if (drops) - printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); } static void qe_tx_reclaim(struct sunqe *qep); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index e15cc71b826d..e8824cea093b 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) dno = bdx_rxdb_available(db) - 1; while (dno > 0) { skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); - if (!skb) { - pr_err("NO MEM: netdev_alloc_skb failed\n"); + if (!skb) break; - } + skb_reserve(skb, NET_IP_ALIGN); idx = bdx_rxdb_alloc_elem(db); diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 22725386c5de..bdda36f8e541 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1911,10 +1911,8 @@ static void tlan_reset_lists(struct net_device *dev) list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) { - netdev_err(dev, "Out of memory for received data\n"); + if (!skb) break; - } list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9fc2ada4c3c2..5ac43e4ace25 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -273,11 +273,9 @@ static int temac_dma_bd_init(struct net_device *ndev) skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); - - if (skb == 0) { - dev_err(&ndev->dev, "alloc_skb error %d\n", i); + if (!skb) goto out; - } + lp->rx_skb[i] = skb; /* returns physical address of skb->data */ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, @@ -789,9 +787,7 @@ static void ll_temac_recv(struct net_device *ndev) new_skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); - - if (new_skb == 0) { - dev_err(&ndev->dev, "no memory for new sk_buff\n"); + if (!new_skb) { spin_unlock_irqrestore(&lp->rx_lock, flags); return; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 278c9db3b5b8..397d4a6a1f30 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -235,10 +235,8 @@ static int axienet_dma_bd_init(struct net_device *ndev) ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); - if (!skb) { - dev_err(&ndev->dev, "alloc_skb error %d\n", i); + if (!skb) goto out; - } lp->rx_bd_v[i].sw_id_offset = (u32) skb; lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, @@ -777,10 +775,9 @@ static void axienet_recv(struct net_device *ndev) packets++; new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); - if (!new_skb) { - dev_err(&ndev->dev, "no memory for new sk_buff\n"); + if (!new_skb) return; - } + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 98e09d0d3ce2..76210abf2e9b 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id) /* 1 extra so we can use insw */ skb = netdev_alloc_skb(dev, pktlen + 3); if (!skb) { - pr_notice("low memory, packet dropped (size=%u)\n", pktlen); dev->stats.rx_dropped++; } else { /* okay get the packet */ skb_reserve(skb, 2); diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index ec40ba882f61..ff2e45e9cb54 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -159,7 +159,7 @@ static int lxt973a2_update_link(struct phy_device *phydev) return 0; } -int lxt973a2_read_status(struct phy_device *phydev) +static int lxt973a2_read_status(struct phy_device *phydev) { int adv; int err; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 2585c383e623..3492b5391273 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -61,7 +61,7 @@ MODULE_DESCRIPTION("Vitesse PHY driver"); MODULE_AUTHOR("Kriston Carson"); MODULE_LICENSE("GPL"); -int vsc824x_add_skew(struct phy_device *phydev) +static int vsc824x_add_skew(struct phy_device *phydev) { int err; int extcon; @@ -81,7 +81,6 @@ int vsc824x_add_skew(struct phy_device *phydev) return err; } -EXPORT_SYMBOL(vsc824x_add_skew); static int vsc824x_config_init(struct phy_device *phydev) { diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig index c3011af68e91..c853d84fd99f 100644 --- a/drivers/net/team/Kconfig +++ b/drivers/net/team/Kconfig @@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN To compile this team mode as a module, choose M here: the module will be called team_mode_roundrobin. +config NET_TEAM_MODE_RANDOM + tristate "Random mode support" + depends on NET_TEAM + ---help--- + Basic mode where port used for transmitting packets is selected + randomly. + + All added ports are setup to have team's device address. + + To compile this team mode as a module, choose M here: the module + will be called team_mode_random. + config NET_TEAM_MODE_ACTIVEBACKUP tristate "Active-backup mode support" depends on NET_TEAM diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile index 975763014e5a..c57e85889751 100644 --- a/drivers/net/team/Makefile +++ b/drivers/net/team/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_NET_TEAM) += team.o obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o +obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index bf3419297875..621c1bddeee9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port) return __set_port_dev_addr(port->dev, port->orig.dev_addr); } -int team_port_set_team_dev_addr(struct team_port *port) +static int team_port_set_team_dev_addr(struct team *team, + struct team_port *port) +{ + return __set_port_dev_addr(port->dev, team->dev->dev_addr); +} + +int team_modeop_port_enter(struct team *team, struct team_port *port) +{ + return team_port_set_team_dev_addr(team, port); +} +EXPORT_SYMBOL(team_modeop_port_enter); + +void team_modeop_port_change_dev_addr(struct team *team, + struct team_port *port) { - return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); + team_port_set_team_dev_addr(team, port); } -EXPORT_SYMBOL(team_port_set_team_dev_addr); +EXPORT_SYMBOL(team_modeop_port_change_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { @@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) return false; } -rx_handler_result_t team_dummy_receive(struct team *team, - struct team_port *port, - struct sk_buff *skb) +static rx_handler_result_t team_dummy_receive(struct team *team, + struct team_port *port, + struct sk_buff *skb) { return RX_HANDLER_ANOTHER; } diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index c5db428e73fa..c366cd299c06 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) return sum_ret; } -static int bc_port_enter(struct team *team, struct team_port *port) -{ - return team_port_set_team_dev_addr(port); -} - -static void bc_port_change_dev_addr(struct team *team, struct team_port *port) -{ - team_port_set_team_dev_addr(port); -} - static const struct team_mode_ops bc_mode_ops = { .transmit = bc_transmit, - .port_enter = bc_port_enter, - .port_change_dev_addr = bc_port_change_dev_addr, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, }; static const struct team_mode bc_mode = { diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c new file mode 100644 index 000000000000..9eabfaa22f3e --- /dev/null +++ b/drivers/net/team/team_mode_random.c @@ -0,0 +1,71 @@ +/* + * drivers/net/team/team_mode_random.c - Random mode for team + * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/reciprocal_div.h> +#include <linux/if_team.h> + +static u32 random_N(unsigned int N) +{ + return reciprocal_divide(random32(), N); +} + +static bool rnd_transmit(struct team *team, struct sk_buff *skb) +{ + struct team_port *port; + int port_index; + + port_index = random_N(team->en_port_count); + port = team_get_port_by_index_rcu(team, port_index); + port = team_get_first_port_txable_rcu(team, port); + if (unlikely(!port)) + goto drop; + if (team_dev_queue_xmit(team, port, skb)) + return false; + return true; + +drop: + dev_kfree_skb_any(skb); + return false; +} + +static const struct team_mode_ops rnd_mode_ops = { + .transmit = rnd_transmit, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, +}; + +static const struct team_mode rnd_mode = { + .kind = "random", + .owner = THIS_MODULE, + .ops = &rnd_mode_ops, +}; + +static int __init rnd_init_module(void) +{ + return team_mode_register(&rnd_mode); +} + +static void __exit rnd_cleanup_module(void) +{ + team_mode_unregister(&rnd_mode); +} + +module_init(rnd_init_module); +module_exit(rnd_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); +MODULE_DESCRIPTION("Random mode for team"); +MODULE_ALIAS("team-mode-random"); diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index 105135aa8f05..d268e4de781b 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team) return (struct rr_priv *) &team->mode_priv; } -static struct team_port *__get_first_port_up(struct team *team, - struct team_port *port) -{ - struct team_port *cur; - - if (team_port_txable(port)) - return port; - cur = port; - list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(port)) - return cur; - list_for_each_entry_rcu(cur, &team->port_list, list) { - if (cur == port) - break; - if (team_port_txable(port)) - return cur; - } - return NULL; -} - static bool rr_transmit(struct team *team, struct sk_buff *skb) { struct team_port *port; @@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb) port_index = rr_priv(team)->sent_packets++ % team->en_port_count; port = team_get_port_by_index_rcu(team, port_index); - port = __get_first_port_up(team, port); + port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; if (team_dev_queue_xmit(team, port, skb)) @@ -64,20 +44,10 @@ drop: return false; } -static int rr_port_enter(struct team *team, struct team_port *port) -{ - return team_port_set_team_dev_addr(port); -} - -static void rr_port_change_dev_addr(struct team *team, struct team_port *port) -{ - team_port_set_team_dev_addr(port); -} - static const struct team_mode_ops rr_mode_ops = { .transmit = rr_transmit, - .port_enter = rr_port_enter, - .port_change_dev_addr = rr_port_change_dev_addr, + .port_enter = team_modeop_port_enter, + .port_change_dev_addr = team_modeop_port_change_dev_addr, }; static const struct team_mode rr_mode = { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7cee7a3068ec..db0df07c18dc 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -820,6 +820,20 @@ static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb) return (((u64) hash * range) >> 32) + vxlan->port_min; } +static int handle_offloads(struct sk_buff *skb) +{ + if (skb_is_gso(skb)) { + int err = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(err)) + return err; + + skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP); + } else if (skb->ip_summed != CHECKSUM_PARTIAL) + skb->ip_summed = CHECKSUM_NONE; + + return 0; +} + /* Transmit local packets over Vxlan * * Outer IP header inherits ECN and DF from inner header. @@ -841,7 +855,6 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) __u16 src_port; __be16 df = 0; __u8 tos, ttl; - int err; bool did_rsc = false; const struct vxlan_fdb *f; @@ -965,22 +978,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) vxlan_set_owner(dev, skb); - /* See iptunnel_xmit() */ - if (skb->ip_summed != CHECKSUM_PARTIAL) - skb->ip_summed = CHECKSUM_NONE; - - err = ip_local_out(skb); - if (likely(net_xmit_eval(err) == 0)) { - struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats); + if (handle_offloads(skb)) + goto drop; - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += pkt_len; - u64_stats_update_end(&stats->syncp); - } else { - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } + iptunnel_xmit(skb, dev); return NETDEV_TX_OK; drop: @@ -1189,8 +1190,10 @@ static void vxlan_setup(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; + dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index cfd21e3d5506..4474557904f6 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -112,6 +112,10 @@ struct team_mode_ops { void (*port_disabled)(struct team *team, struct team_port *port); }; +extern int team_modeop_port_enter(struct team *team, struct team_port *port); +extern void team_modeop_port_change_dev_addr(struct team *team, + struct team_port *port); + enum team_option_type { TEAM_OPTION_TYPE_U32, TEAM_OPTION_TYPE_STRING, @@ -236,7 +240,26 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team, return NULL; } -extern int team_port_set_team_dev_addr(struct team_port *port); +static inline struct team_port * +team_get_first_port_txable_rcu(struct team *team, struct team_port *port) +{ + struct team_port *cur; + + if (likely(team_port_txable(port))) + return port; + cur = port; + list_for_each_entry_continue_rcu(cur, &team->port_list, list) + if (team_port_txable(port)) + return cur; + list_for_each_entry_rcu(cur, &team->port_list, list) { + if (cur == port) + break; + if (team_port_txable(port)) + return cur; + } + return NULL; +} + extern int team_options_register(struct team *team, const struct team_option *option, size_t option_count); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 3dd39340430e..f5e797c0c2a4 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -42,9 +42,9 @@ enum { NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ - /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ - NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ - = NETIF_F_GSO_LAST, + NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ + NETIF_F_GSO_UDP_TUNNEL_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -103,6 +103,7 @@ enum { #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GRE_GSO __NETIF_F(GSO_GRE) +#define NETIF_F_UDP_TUNNEL __NETIF_F(UDP_TUNNEL) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b314..e1ebeffa6b35 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1475,6 +1475,11 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) +/* Default NAPI poll() weight + * Device drivers are strongly advised to not use bigger value + */ +#define NAPI_POLL_WEIGHT 64 + /** * netif_napi_add - initialize a napi context * @dev: network device @@ -2678,6 +2683,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { return __skb_gso_segment(skb, features, true); } +__be16 skb_network_protocol(struct sk_buff *skb); + +static inline bool can_checksum_protocol(netdev_features_t features, + __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_V4_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_V6_CSUM) && + protocol == htons(ETH_P_IPV6)) || + ((features & NETIF_F_FCOE_CRC) && + protocol == htons(ETH_P_FCOE))); +} #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 489dd7bb28ec..f28544b2f9af 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -69,6 +69,15 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx); +extern int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags); +extern int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u16 mode); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 821c7f45d2a7..eb2106fe3bb4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -316,6 +316,8 @@ enum { SKB_GSO_FCOE = 1 << 5, SKB_GSO_GRE = 1 << 6, + + SKB_GSO_UDP_TUNNEL = 1 << 7, }; #if BITS_PER_LONG > 32 @@ -387,6 +389,7 @@ typedef unsigned char *sk_buff_data_t; * @vlan_tci: vlan tag control information * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) + * @inner_mac_header: Link layer header (encapsulation) * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header @@ -505,6 +508,7 @@ struct sk_buff { sk_buff_data_t inner_transport_header; sk_buff_data_t inner_network_header; + sk_buff_data_t inner_mac_header; sk_buff_data_t transport_header; sk_buff_data_t network_header; sk_buff_data_t mac_header; @@ -1466,6 +1470,7 @@ static inline void skb_reserve(struct sk_buff *skb, int len) static inline void skb_reset_inner_headers(struct sk_buff *skb) { + skb->inner_mac_header = skb->mac_header; skb->inner_network_header = skb->network_header; skb->inner_transport_header = skb->transport_header; } @@ -1511,6 +1516,22 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb, skb->inner_network_header += offset; } +static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_mac_header; +} + +static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data - skb->head; +} + +static inline void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_mac_header(skb); + skb->inner_mac_header += offset; +} static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { return skb->transport_header != ~0U; @@ -1604,6 +1625,21 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb, skb->inner_network_header = skb->data + offset; } +static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->inner_mac_header; +} + +static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data; +} + +static inline void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb->inner_mac_header = skb->data + offset; +} static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { return skb->transport_header != NULL; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f28408c07dc2..515c3746b675 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -361,10 +361,6 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif - /* Few sockets in timewait have cookies; in that case, then this - * object holds a reference to them (tw_cookie_values->kref). - */ - struct tcp_cookie_values *tw_cookie_values; }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h new file mode 100644 index 000000000000..0805eecba8f7 --- /dev/null +++ b/include/linux/vm_sockets.h @@ -0,0 +1,23 @@ +/* + * VMware vSockets Driver + * + * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation version 2 and no later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _VM_SOCKETS_H +#define _VM_SOCKETS_H + +#include <uapi/linux/vm_sockets.h> + +int vm_sockets_get_local_cid(void); + +#endif /* _VM_SOCKETS_H */ diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index e03047f7090b..ebdef7f60862 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -68,4 +68,24 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); +static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + int pkt_len, err; + + nf_reset(skb); + pkt_len = skb->len; + err = ip6_local_out(skb); + + if (net_xmit_eval(err) == 0) { + struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); + u64_stats_update_begin(&tstats->syncp); + tstats->tx_bytes += pkt_len; + tstats->tx_packets++; + u64_stats_update_end(&tstats->syncp); + } else { + stats->tx_errors++; + stats->tx_aborted_errors++; + } +} #endif diff --git a/include/net/ipip.h b/include/net/ipip.h index fd19625ff99d..0c046e3bca05 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -51,13 +51,10 @@ struct ip_tunnel_prl_entry { static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev) { int err; - struct iphdr *iph = ip_hdr(skb); int pkt_len = skb->len - skb_transport_offset(skb); struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); nf_reset(skb); - skb->ip_summed = CHECKSUM_NONE; - ip_select_ident(iph, skb_dst(skb), NULL); err = ip_local_out(skb); if (likely(net_xmit_eval(err) == 0)) { diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 64d12e77719a..42ef6abf25c3 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -217,7 +217,7 @@ struct ipv6_txoptions { }; struct ip6_flowlabel { - struct ip6_flowlabel *next; + struct ip6_flowlabel __rcu *next; __be32 label; atomic_t users; struct in6_addr dst; @@ -238,9 +238,9 @@ struct ip6_flowlabel { #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) struct ipv6_fl_socklist { - struct ipv6_fl_socklist *next; - struct ip6_flowlabel *fl; - struct rcu_head rcu; + struct ipv6_fl_socklist __rcu *next; + struct ip6_flowlabel *fl; + struct rcu_head rcu; }; extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); @@ -320,6 +320,18 @@ static inline int ipv6_addr_src_scope(const struct in6_addr *addr) return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); } +static inline bool __ipv6_addr_needs_scope_id(int type) +{ + return type & IPV6_ADDR_LINKLOCAL || + (type & IPV6_ADDR_MULTICAST && + (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); +} + +static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) +{ + return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; +} + static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) { return memcmp(a1, a2, sizeof(struct in6_addr)); diff --git a/include/net/tcp.h b/include/net/tcp.h index cf0694d4ad60..a2baa5e4ba31 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1030,50 +1030,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) #endif } -/* Packet is added to VJ-style prequeue for processing in process - * context, if a reader task is waiting. Apparently, this exciting - * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) - * failed somewhere. Latency? Burstiness? Well, at least now we will - * see, why it failed. 8)8) --ANK - * - * NOTE: is this not too big to inline? - */ -static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (sysctl_tcp_low_latency || !tp->ucopy.task) - return false; - - if (skb->len <= tcp_hdrlen(skb) && - skb_queue_len(&tp->ucopy.prequeue) == 0) - return false; - - __skb_queue_tail(&tp->ucopy.prequeue, skb); - tp->ucopy.memory += skb->truesize; - if (tp->ucopy.memory > sk->sk_rcvbuf) { - struct sk_buff *skb1; - - BUG_ON(sock_owned_by_user(sk)); - - while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { - sk_backlog_rcv(sk, skb1); - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPPREQUEUEDROPPED); - } - - tp->ucopy.memory = 0; - } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { - wake_up_interruptible_sync_poll(sk_sleep(sk), - POLLIN | POLLRDNORM | POLLRDBAND); - if (!inet_csk_ack_scheduled(sk)) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - (3 * tcp_rto_min(sk)) / 4, - TCP_RTO_MAX); - } - return true; -} - +extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); #undef STATE_TRACE diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 32aef0a439ef..dbd71b0c7d8c 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -348,6 +348,7 @@ enum { TCA_HTB_INIT, TCA_HTB_CTAB, TCA_HTB_RTAB, + TCA_HTB_DIRECT_QLEN, __TCA_HTB_MAX, }; diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h index df91301847ec..b4ed5d895699 100644 --- a/include/uapi/linux/vm_sockets.h +++ b/include/uapi/linux/vm_sockets.h @@ -13,12 +13,10 @@ * more details. */ -#ifndef _VM_SOCKETS_H_ -#define _VM_SOCKETS_H_ +#ifndef _UAPI_VM_SOCKETS_H +#define _UAPI_VM_SOCKETS_H -#if !defined(__KERNEL__) -#include <sys/socket.h> -#endif +#include <linux/socket.h> /* Option name for STREAM socket buffer size. Use as the option name in * setsockopt(3) or getsockopt(3) to set or get an unsigned long long that @@ -137,14 +135,13 @@ #define VM_SOCKETS_VERSION_MINOR(_v) (((_v) & 0x0000FFFF)) /* Address structure for vSockets. The address family should be set to - * whatever vmci_sock_get_af_value_fd() returns. The structure members should - * all align on their natural boundaries without resorting to compiler packing - * directives. The total size of this structure should be exactly the same as - * that of struct sockaddr. + * AF_VSOCK. The structure members should all align on their natural + * boundaries without resorting to compiler packing directives. The total size + * of this structure should be exactly the same as that of struct sockaddr. */ struct sockaddr_vm { - sa_family_t svm_family; + __kernel_sa_family_t svm_family; unsigned short svm_reserved1; unsigned int svm_port; unsigned int svm_cid; @@ -156,8 +153,4 @@ struct sockaddr_vm { #define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) -#if defined(__KERNEL__) -int vm_sockets_get_local_cid(void); -#endif - -#endif +#endif /* _UAPI_VM_SOCKETS_H */ diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 923fbeaf7afd..81f2389f78eb 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1369,7 +1369,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, return -EINVAL; if (iph->protocol != IPPROTO_IGMP) { - if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) + if (!ipv4_is_local_multicast(iph->daddr)) BR_INPUT_SKB_CB(skb)->mrouters_only = 1; return 0; } diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 21760f008974..df6d56d8689a 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -301,10 +301,11 @@ static void dev_flowctrl(struct net_device *dev, int on) } void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, - struct cflayer *link_support, int head_room, - struct cflayer **layer, int (**rcv_func)( - struct sk_buff *, struct net_device *, - struct packet_type *, struct net_device *)) + struct cflayer *link_support, int head_room, + struct cflayer **layer, + int (**rcv_func)(struct sk_buff *, struct net_device *, + struct packet_type *, + struct net_device *)) { struct caif_device_entry *caifd; enum cfcnfg_phy_preference pref; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f83902..1d337e02bc63 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -197,8 +197,8 @@ static void cfsk_put(struct cflayer *layr) /* Packet Control Callback function called from CAIF */ static void caif_ctrl_cb(struct cflayer *layr, - enum caif_ctrlcmd flow, - int phyid) + enum caif_ctrlcmd flow, + int phyid) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); switch (flow) { @@ -274,7 +274,7 @@ static void caif_check_flow_release(struct sock *sk) * changed locking, address handling and added MSG_TRUNC. */ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t len, int flags) + struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; @@ -346,8 +346,8 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) * changed locking calls, changed address handling. */ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, - int flags) + struct msghdr *msg, size_t size, + int flags) { struct sock *sk = sock->sk; int copied = 0; @@ -462,7 +462,7 @@ out: * CAIF flow-on and sock_writable. */ static long caif_wait_for_flow_on(struct caifsock *cf_sk, - int wait_writeable, long timeo, int *err) + int wait_writeable, long timeo, int *err) { struct sock *sk = &cf_sk->sk; DEFINE_WAIT(wait); @@ -516,7 +516,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) + struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -591,7 +591,7 @@ err: * and other minor adaptations. */ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, - struct msghdr *msg, size_t len) + struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -670,7 +670,7 @@ out_err: } static int setsockopt(struct socket *sock, - int lvl, int opt, char __user *ov, unsigned int ol) + int lvl, int opt, char __user *ov, unsigned int ol) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -932,7 +932,7 @@ static int caif_release(struct socket *sock) /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ static unsigned int caif_poll(struct file *file, - struct socket *sock, poll_table *wait) + struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; @@ -1022,7 +1022,7 @@ static void caif_sock_destructor(struct sock *sk) } static int caif_create(struct net *net, struct socket *sock, int protocol, - int kern) + int kern) { struct sock *sk = NULL; struct caifsock *cf_sk = NULL; diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index ef8ebaa993cf..d76278d644b8 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -75,7 +75,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt) } static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { if (layr->up && layr->up->ctrlcmd) layr->up->ctrlcmd(layr->up, ctrl, layr->id); @@ -121,7 +121,7 @@ static struct packet_type caif_usb_type __read_mostly = { }; static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *arg) { struct net_device *dev = arg; struct caif_dev_common common; diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index f1dbddb95a6c..246ac3aa8de5 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -61,11 +61,11 @@ struct cfcnfg { }; static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, - enum cfctrl_srv serv, u8 phyid, - struct cflayer *adapt_layer); + enum cfctrl_srv serv, u8 phyid, + struct cflayer *adapt_layer); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, - struct cflayer *adapt_layer); + struct cflayer *adapt_layer); static void cfctrl_resp_func(void); static void cfctrl_enum_resp(void); @@ -131,7 +131,7 @@ static void cfctrl_resp_func(void) } static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, - u8 phyid) + u8 phyid) { struct cfcnfg_phyinfo *phy; @@ -216,8 +216,8 @@ static const int protohead[CFCTRL_SRV_MASK] = { static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, - struct caif_connect_request *s, - struct cfctrl_link_param *l) + struct caif_connect_request *s, + struct cfctrl_link_param *l) { struct dev_info *dev_info; enum cfcnfg_phy_preference pref; @@ -301,8 +301,7 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, struct cflayer *adap_layer, int *ifindex, - int *proto_head, - int *proto_tail) + int *proto_head, int *proto_tail) { struct cflayer *frml; struct cfcnfg_phyinfo *phy; @@ -364,7 +363,7 @@ unlock: EXPORT_SYMBOL(caif_connect_client); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, - struct cflayer *adapt_layer) + struct cflayer *adapt_layer) { if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) adapt_layer->ctrlcmd(adapt_layer, @@ -526,7 +525,7 @@ out_err: EXPORT_SYMBOL(cfcnfg_add_phy_layer); int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, - bool up) + bool up) { struct cfcnfg_phyinfo *phyinfo; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index a376ec1ac0a7..9cd057c59c59 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -20,12 +20,12 @@ #ifdef CAIF_NO_LOOP static int handle_loop(struct cfctrl *ctrl, - int cmd, struct cfpkt *pkt){ + int cmd, struct cfpkt *pkt){ return -1; } #else static int handle_loop(struct cfctrl *ctrl, - int cmd, struct cfpkt *pkt); + int cmd, struct cfpkt *pkt); #endif static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, @@ -72,7 +72,7 @@ void cfctrl_remove(struct cflayer *layer) } static bool param_eq(const struct cfctrl_link_param *p1, - const struct cfctrl_link_param *p2) + const struct cfctrl_link_param *p2) { bool eq = p1->linktype == p2->linktype && @@ -197,8 +197,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) } int cfctrl_linkup_request(struct cflayer *layer, - struct cfctrl_link_param *param, - struct cflayer *user_layer) + struct cfctrl_link_param *param, + struct cflayer *user_layer) { struct cfctrl *cfctrl = container_obj(layer); u32 tmp32; @@ -301,7 +301,7 @@ int cfctrl_linkup_request(struct cflayer *layer, } int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, - struct cflayer *client) + struct cflayer *client) { int ret; struct cfpkt *pkt; @@ -555,7 +555,7 @@ error: } static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { struct cfctrl *this = container_obj(layr); switch (ctrl) { diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index 0a7df7ef062d..204c5e226a61 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c @@ -28,7 +28,7 @@ struct cffrml { static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid); + int phyid); static u32 cffrml_rcv_error; static u32 cffrml_rcv_checsum_error; @@ -167,7 +167,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) } static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { if (layr->up && layr->up->ctrlcmd) layr->up->ctrlcmd(layr->up, ctrl, layr->id); diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 94b08612a4d8..154d9f8f964c 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c @@ -42,7 +42,7 @@ struct cfmuxl { static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid); + int phyid); static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); struct cflayer *cfmuxl_create(void) @@ -244,7 +244,7 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) } static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { struct cfmuxl *muxl = container_obj(layr); struct cflayer *layer; diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 863dedd91bb6..e8f9c149504d 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -266,8 +266,8 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt) } inline u16 cfpkt_iterate(struct cfpkt *pkt, - u16 (*iter_func)(u16, void *, u16), - u16 data) + u16 (*iter_func)(u16, void *, u16), + u16 data) { /* * Don't care about the performance hit of linearizing, @@ -307,8 +307,8 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len) } struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, - struct cfpkt *addpkt, - u16 expectlen) + struct cfpkt *addpkt, + u16 expectlen) { struct sk_buff *dst = pkt_to_skb(dstpkt); struct sk_buff *add = pkt_to_skb(addpkt); diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index 2b563ad04597..db51830c8587 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -43,7 +43,7 @@ static void cfrfml_release(struct cflayer *layer) } struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, - int mtu_size) + int mtu_size) { int tmp; struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); @@ -69,7 +69,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, } static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, - struct cfpkt *pkt, int *err) + struct cfpkt *pkt, int *err) { struct cfpkt *tmppkt; *err = -EPROTO; diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 8e68b97f13ee..147c232b1285 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -29,7 +29,7 @@ struct cfserl { static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid); + int phyid); struct cflayer *cfserl_create(int instance, bool use_stx) { @@ -182,7 +182,7 @@ static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) } static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { layr->up->ctrlcmd(layr->up, ctrl, phyid); } diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index ba217e90765e..95f7f5ea30ef 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -25,7 +25,7 @@ #define container_obj(layr) container_of(layr, struct cfsrvl, layer) static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, - int phyid) + int phyid) { struct cfsrvl *service = container_obj(layr); @@ -158,10 +158,9 @@ static void cfsrvl_release(struct cflayer *layer) } void cfsrvl_init(struct cfsrvl *service, - u8 channel_id, - struct dev_info *dev_info, - bool supports_flowctrl - ) + u8 channel_id, + struct dev_info *dev_info, + bool supports_flowctrl) { caif_assert(offsetof(struct cfsrvl, layer) == 0); service->open = false; @@ -207,8 +206,8 @@ void caif_free_client(struct cflayer *adap_layer) EXPORT_SYMBOL(caif_free_client); void caif_client_register_refcnt(struct cflayer *adapt_layer, - void (*hold)(struct cflayer *lyr), - void (*put)(struct cflayer *lyr)) + void (*hold)(struct cflayer *lyr), + void (*put)(struct cflayer *lyr)) { struct cfsrvl *service; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index e597733affb8..26a4e4e3a767 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -167,7 +167,7 @@ static void chnl_put(struct cflayer *lyr) } static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, - int phyid) + int phyid) { struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); pr_debug("NET flowctrl func called flow: %s\n", @@ -443,7 +443,7 @@ nla_put_failure: } static void caif_netlink_parms(struct nlattr *data[], - struct caif_connect_request *conn_req) + struct caif_connect_request *conn_req) { if (!data) { pr_warn("no params data found\n"); @@ -488,7 +488,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, } static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[]) + struct nlattr *data[]) { struct chnl_net *caifdev; ASSERT_RTNL(); diff --git a/net/can/af_can.c b/net/can/af_can.c index c48e5220bbac..8bacf281b3ee 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -525,7 +525,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, d = find_dev_rcv_lists(dev); if (!d) { - printk(KERN_ERR "BUG: receive list not found for " + pr_err("BUG: receive list not found for " "dev %s, id %03X, mask %03X\n", DNAME(dev), can_id, mask); goto out; @@ -552,7 +552,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, */ if (!r) { - printk(KERN_ERR "BUG: receive list entry not found for " + pr_err("BUG: receive list entry not found for " "dev %s, id %03X, mask %03X\n", DNAME(dev), can_id, mask); r = NULL; @@ -749,8 +749,7 @@ int can_proto_register(const struct can_proto *cp) int err = 0; if (proto < 0 || proto >= CAN_NPROTO) { - printk(KERN_ERR "can: protocol number %d out of range\n", - proto); + pr_err("can: protocol number %d out of range\n", proto); return -EINVAL; } @@ -761,8 +760,7 @@ int can_proto_register(const struct can_proto *cp) mutex_lock(&proto_tab_lock); if (proto_tab[proto]) { - printk(KERN_ERR "can: protocol %d already registered\n", - proto); + pr_err("can: protocol %d already registered\n", proto); err = -EBUSY; } else RCU_INIT_POINTER(proto_tab[proto], cp); @@ -816,11 +814,8 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, /* create new dev_rcv_lists for this device */ d = kzalloc(sizeof(*d), GFP_KERNEL); - if (!d) { - printk(KERN_ERR - "can: allocation of receive list failed\n"); + if (!d) return NOTIFY_DONE; - } BUG_ON(dev->ml_priv); dev->ml_priv = d; @@ -838,8 +833,8 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, dev->ml_priv = NULL; } } else - printk(KERN_ERR "can: notifier: receive list not " - "found for dev %s\n", dev->name); + pr_err("can: notifier: receive list not found for dev " + "%s\n", dev->name); spin_unlock(&can_rcvlists_lock); @@ -927,7 +922,7 @@ static __exit void can_exit(void) /* remove created dev_rcv_lists from still registered CAN devices */ rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { - if (dev->type == ARPHRD_CAN && dev->ml_priv){ + if (dev->type == ARPHRD_CAN && dev->ml_priv) { struct dev_rcv_lists *d = dev->ml_priv; diff --git a/net/core/dev.c b/net/core/dev.c index dffbef70cd31..0caa38ee8935 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2208,16 +2208,8 @@ out: } EXPORT_SYMBOL(skb_checksum_help); -/** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) +__be16 skb_network_protocol(struct sk_buff *skb) { - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; __be16 type = skb->protocol; while (type == htons(ETH_P_8021Q)) { @@ -2225,13 +2217,31 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, struct vlan_hdr *vh; if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) - return ERR_PTR(-EINVAL); + return 0; vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } + return type; +} + +/** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + __be16 type = skb_network_protocol(skb); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + __skb_pull(skb, skb->mac_len); rcu_read_lock(); @@ -2398,24 +2408,12 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) return 0; } -static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) -{ - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); -} - static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; - features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } @@ -4058,6 +4056,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, napi->gro_list = NULL; napi->skb = NULL; napi->poll = poll; + if (weight > NAPI_POLL_WEIGHT) + pr_err_once("netif_napi_add() called with weight %d on device %s\n", + weight, dev->name); napi->weight = weight; list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; @@ -4919,20 +4920,25 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } - /* Fix illegal SG+CSUM combinations. */ - if ((features & NETIF_F_SG) && - !(features & NETIF_F_ALL_CSUM)) { - netdev_dbg(dev, - "Dropping NETIF_F_SG since no checksum feature.\n"); - features &= ~NETIF_F_SG; - } - /* TSO requires that SG is present as well. */ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); features &= ~NETIF_F_ALL_TSO; } + if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IP_CSUM)) { + netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO; + features &= ~NETIF_F_TSO_ECN; + } + + if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) && + !(features & NETIF_F_IPV6_CSUM)) { + netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n"); + features &= ~NETIF_F_TSO6; + } + /* TSO ECN requires that TSO is present as well. */ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) features &= ~NETIF_F_TSO_ECN; @@ -5200,6 +5206,10 @@ int register_netdevice(struct net_device *dev) */ dev->vlan_features |= NETIF_F_HIGHDMA; + /* Make NETIF_F_SG inheritable to tunnel devices. + */ + dev->hw_enc_features |= NETIF_F_SG; + ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 3e9b2c3e30f0..adc1351e6873 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -78,6 +78,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", + [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400d..f8d9e03b7a7c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -119,6 +119,17 @@ ipv6: nhoff += 4; if (hdr->flags & GRE_SEQ) nhoff += 4; + if (proto == htons(ETH_P_TEB)) { + const struct ethhdr *eth; + struct ethhdr _eth; + + eth = skb_header_pointer(skb, nhoff, + sizeof(_eth), &_eth); + if (!eth) + return false; + proto = eth->h_proto; + nhoff += sizeof(*eth); + } goto again; } break; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a585d45cc9d9..55b5624a4b00 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2049,6 +2049,38 @@ errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } +/** + * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry + */ +int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags) +{ + int err = -EINVAL; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return err; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} +EXPORT_SYMBOL(ndo_dflt_fdb_add); + static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2101,10 +2133,13 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } /* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { - err = dev->netdev_ops->ndo_fdb_add(ndm, tb, - dev, addr, - nlh->nlmsg_flags); + if ((ndm->ndm_flags & NTF_SELF)) { + if (dev->netdev_ops->ndo_fdb_add) + err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags); + else + err = ndo_dflt_fdb_add(ndm, tb, dev, addr, + nlh->nlmsg_flags); if (!err) { rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); @@ -2115,6 +2150,35 @@ out: return err; } +/** + * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry + */ +int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr) +{ + int err = -EOPNOTSUPP; + + /* If aging addresses are supported device will need to + * implement its own handler for this. + */ + if (ndm->ndm_state & NUD_PERMANENT) { + pr_info("%s: FDB only supports static addresses\n", dev->name); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + + return err; +} +EXPORT_SYMBOL(ndo_dflt_fdb_del); + static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); @@ -2172,8 +2236,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) } /* Embedded bridge, macvlan, and any other device support */ - if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { - err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + if (ndm->ndm_flags & NTF_SELF) { + if (dev->netdev_ops->ndo_fdb_del) + err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); + else + err = ndo_dflt_fdb_del(ndm, tb, dev, addr); if (!err) { rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); @@ -2258,6 +2325,8 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) if (dev->netdev_ops->ndo_fdb_dump) idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); + else + ndo_dflt_fdb_dump(skb, cb, dev, idx); } rcu_read_unlock(); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 33245ef54c3b..31c6737d3189 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -673,6 +673,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->mac_header = old->mac_header; new->inner_transport_header = old->inner_transport_header; new->inner_network_header = old->inner_network_header; + new->inner_mac_header = old->inner_mac_header; skb_dst_copy(new, old); new->rxhash = old->rxhash; new->ooo_okay = old->ooo_okay; @@ -867,6 +868,18 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) } EXPORT_SYMBOL(skb_clone); +static void skb_headers_offset_update(struct sk_buff *skb, int off) +{ + /* {transport,network,mac}_header and tail are relative to skb->head */ + skb->transport_header += off; + skb->network_header += off; + if (skb_mac_header_was_set(skb)) + skb->mac_header += off; + skb->inner_transport_header += off; + skb->inner_network_header += off; + skb->inner_mac_header += off; +} + static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { #ifndef NET_SKBUFF_DATA_USES_OFFSET @@ -879,13 +892,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) __copy_skb_header(new, old); #ifndef NET_SKBUFF_DATA_USES_OFFSET - /* {transport,network,mac}_header are relative to skb->head */ - new->transport_header += offset; - new->network_header += offset; - if (skb_mac_header_was_set(new)) - new->mac_header += offset; - new->inner_transport_header += offset; - new->inner_network_header += offset; + skb_headers_offset_update(new, offset); #endif skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; @@ -1077,14 +1084,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, #else skb->end = skb->head + size; #endif - /* {transport,network,mac}_header and tail are relative to skb->head */ skb->tail += off; - skb->transport_header += off; - skb->network_header += off; - if (skb_mac_header_was_set(skb)) - skb->mac_header += off; - skb->inner_transport_header += off; - skb->inner_network_header += off; + skb_headers_offset_update(skb, off); /* Only adjust this if it actually is csum_start rather than csum */ if (skb->ip_summed == CHECKSUM_PARTIAL) skb->csum_start += nhead; @@ -1180,12 +1181,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, if (n->ip_summed == CHECKSUM_PARTIAL) n->csum_start += off; #ifdef NET_SKBUFF_DATA_USES_OFFSET - n->transport_header += off; - n->network_header += off; - if (skb_mac_header_was_set(skb)) - n->mac_header += off; - n->inner_transport_header += off; - n->inner_network_header += off; + skb_headers_offset_update(n, off); #endif return n; @@ -2741,12 +2737,19 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) unsigned int tnl_hlen = skb_tnl_header_len(skb); unsigned int headroom; unsigned int len; + __be16 proto; + bool csum; int sg = !!(features & NETIF_F_SG); int nfrags = skb_shinfo(skb)->nr_frags; int err = -ENOMEM; int i = 0; int pos; + proto = skb_network_protocol(skb); + if (unlikely(!proto)) + return ERR_PTR(-EINVAL); + + csum = !!can_checksum_protocol(features, proto); __skb_push(skb, doffset); headroom = skb_headroom(skb); pos = skb_headlen(skb); @@ -2884,6 +2887,12 @@ skip_fraglist: nskb->data_len = len - hsize; nskb->len += nskb->data_len; nskb->truesize += nskb->data_len; + + if (!csum) { + nskb->csum = skb_checksum(nskb, doffset, + nskb->len - doffset, 0); + nskb->ip_summed = CHECKSUM_NONE; + } } while ((offset += len) < skb->len); return segs; diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c index 1d9eb7c60a68..4f72fc40bf02 100644 --- a/net/dcb/dcbevent.c +++ b/net/dcb/dcbevent.c @@ -20,6 +20,7 @@ #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <linux/export.h> +#include <net/dcbevent.h> static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..9e5882caf8a7 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1283,9 +1283,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int ihl; int id; unsigned int offset = 0; - - if (!(features & NETIF_F_V4_CSUM)) - features &= ~NETIF_F_SG; + bool tunnel; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | @@ -1293,6 +1291,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | 0))) goto out; @@ -1307,6 +1306,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, ihl))) goto out; + tunnel = !!skb->encapsulation; + __skb_pull(skb, ihl); skb_reset_transport_header(skb); iph = ip_hdr(skb); @@ -1326,7 +1327,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb = segs; do { iph = ip_hdr(skb); - if (proto == IPPROTO_UDP) { + if (!tunnel && proto == IPPROTO_UDP) { iph->id = htons(id); iph->frag_off = htons(offset >> 3); if (skb->next != NULL) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..af57bbae05b9 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1791,6 +1791,74 @@ errout: return err; } +static int inet_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + int h, s_h; + int idx, s_idx; + struct net_device *dev; + struct in_device *in_dev; + struct hlist_head *head; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + in_dev = __in_dev_get_rcu(dev); + if (!in_dev) + goto cont; + + if (inet_netconf_fill_devconf(skb, dev->ifindex, + &in_dev->cnf, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + -1) <= 0) { + rcu_read_unlock(); + goto done; + } +cont: + idx++; + } + rcu_read_unlock(); + } + if (h == NETDEV_HASHENTRIES) { + if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, + net->ipv4.devconf_all, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + if (h == NETDEV_HASHENTRIES + 1) { + if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, + net->ipv4.devconf_dflt, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } +done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; +} + #ifdef CONFIG_SYSCTL static void devinet_copy_dflt_conf(struct net *net, int i) @@ -2195,6 +2263,6 @@ void __init devinet_init(void) rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, - NULL, NULL); + inet_netconf_dump_devconf, NULL); } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..a13a0972a57f 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -762,7 +762,6 @@ error: static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *old_iph; const struct iphdr *tiph; @@ -778,7 +777,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev int mtu; u8 ttl; int err; - int pkt_len; skb = handle_offloads(tunnel, skb); if (IS_ERR(skb)) { @@ -1022,19 +1020,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev } } - nf_reset(skb); - - pkt_len = skb->len - skb_transport_offset(skb); - err = ip_local_out(skb); - if (likely(net_xmit_eval(err) == 0)) { - u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += pkt_len; - tstats->tx_packets++; - u64_stats_update_end(&tstats->syncp); - } else { - dev->stats.tx_errors++; - dev->stats.tx_aborted_errors++; - } + iptunnel_xmit(skb, dev); return NETDEV_TX_OK; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 8f024d41eefa..34e006fe2d87 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -481,11 +481,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->protocol != htons(ETH_P_IP)) goto tx_error; - - if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(skb)) - goto tx_error; - old_iph = ip_hdr(skb); if (tos & 1) @@ -570,6 +565,13 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) old_iph = ip_hdr(skb); } + if (!skb->encapsulation) { + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } + if (skb->ip_summed != CHECKSUM_PARTIAL) + skb->ip_summed = CHECKSUM_NONE; + skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); @@ -591,11 +593,13 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); iph->daddr = fl4.daddr; iph->saddr = fl4.saddr; + tunnel_ip_select_ident(skb, old_iph, &rt->dst); if ((iph->ttl = tiph->ttl) == 0) iph->ttl = old_iph->ttl; iptunnel_xmit(skb, dev); + return NETDEV_TX_OK; tx_error_icmp: diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..8d14573ade77 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3044,6 +3044,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..8cdee120a50c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1950,6 +1950,50 @@ void tcp_v4_early_demux(struct sk_buff *skb) } } +/* Packet is added to VJ-style prequeue for processing in process + * context, if a reader task is waiting. Apparently, this exciting + * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) + * failed somewhere. Latency? Burstiness? Well, at least now we will + * see, why it failed. 8)8) --ANK + * + */ +bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (sysctl_tcp_low_latency || !tp->ucopy.task) + return false; + + if (skb->len <= tcp_hdrlen(skb) && + skb_queue_len(&tp->ucopy.prequeue) == 0) + return false; + + __skb_queue_tail(&tp->ucopy.prequeue, skb); + tp->ucopy.memory += skb->truesize; + if (tp->ucopy.memory > sk->sk_rcvbuf) { + struct sk_buff *skb1; + + BUG_ON(sock_owned_by_user(sk)); + + while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { + sk_backlog_rcv(sk, skb1); + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPPREQUEUEDROPPED); + } + + tp->ucopy.memory = 0; + } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { + wake_up_interruptible_sync_poll(sk_sleep(sk), + POLLIN | POLLRDNORM | POLLRDBAND); + if (!inet_csk_ack_scheduled(sk)) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + (3 * tcp_rto_min(sk)) / 4, + TCP_RTO_MAX); + } + return true; +} +EXPORT_SYMBOL(tcp_prequeue); + /* * From tcp_input.c */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..41760e043bf5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2272,31 +2272,88 @@ void __init udp_init(void) int udp4_ufo_send_check(struct sk_buff *skb) { - const struct iphdr *iph; - struct udphdr *uh; - - if (!pskb_may_pull(skb, sizeof(*uh))) + if (!pskb_may_pull(skb, sizeof(struct udphdr))) return -EINVAL; - iph = ip_hdr(skb); - uh = udp_hdr(skb); + if (likely(!skb->encapsulation)) { + const struct iphdr *iph; + struct udphdr *uh; - uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, - IPPROTO_UDP, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct udphdr, check); - skb->ip_summed = CHECKSUM_PARTIAL; + iph = ip_hdr(skb); + uh = udp_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, + IPPROTO_UDP, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + skb->ip_summed = CHECKSUM_PARTIAL; + } return 0; } +static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + int mac_len = skb->mac_len; + int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + int outer_hlen; + netdev_features_t enc_features; + + if (unlikely(!pskb_may_pull(skb, tnl_hlen))) + goto out; + + skb->encapsulation = 0; + __skb_pull(skb, tnl_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, skb_inner_network_offset(skb)); + skb->mac_len = skb_inner_network_offset(skb); + + /* segment inner packet. */ + enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + segs = skb_mac_gso_segment(skb, enc_features); + if (!segs || IS_ERR(segs)) + goto out; + + outer_hlen = skb_tnl_header_len(skb); + skb = segs; + do { + struct udphdr *uh; + int udp_offset = outer_hlen - tnl_hlen; + + skb->mac_len = mac_len; + + skb_push(skb, outer_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, mac_len); + skb_set_transport_header(skb, udp_offset); + uh = udp_hdr(skb); + uh->len = htons(skb->len - udp_offset); + + /* csum segment if tunnel sets skb with csum. */ + if (unlikely(uh->check)) { + struct iphdr *iph = ip_hdr(skb); + + uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - udp_offset, + IPPROTO_UDP, 0); + uh->check = csum_fold(skb_checksum(skb, udp_offset, + skb->len - udp_offset, 0)); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + } + skb->ip_summed = CHECKSUM_NONE; + } while ((skb = skb->next)); +out: + return segs; +} + struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; - int offset; - __wsum csum; - mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; @@ -2306,6 +2363,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE) || !(type & (SKB_GSO_UDP)))) goto out; @@ -2316,20 +2374,27 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, goto out; } - /* Do software UFO. Complete and fill in the UDP checksum as HW cannot - * do checksum of UDP packets sent as multiple IP fragments. - */ - offset = skb_checksum_start_offset(skb); - csum = skb_checksum(skb, offset, skb->len - offset, 0); - offset += skb->csum_offset; - *(__sum16 *)(skb->data + offset) = csum_fold(csum); - skb->ip_summed = CHECKSUM_NONE; - /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ - segs = skb_segment(skb, features); + if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) + segs = skb_udp_tunnel_segment(skb, features); + else { + int offset; + __wsum csum; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + offset = skb_checksum_start_offset(skb); + csum = skb_checksum(skb, offset, skb->len - offset, 0); + offset += skb->csum_offset; + *(__sum16 *)(skb->data + offset) = csum_fold(csum); + skb->ip_summed = CHECKSUM_NONE; + + segs = skb_segment(skb, features); + } out: return segs; } - diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f902..fa36a677490f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -605,6 +605,74 @@ errout: return err; } +static int inet6_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + int h, s_h; + int idx, s_idx; + struct net_device *dev; + struct inet6_dev *idev; + struct hlist_head *head; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + idev = __in6_dev_get(dev); + if (!idev) + goto cont; + + if (inet6_netconf_fill_devconf(skb, dev->ifindex, + &idev->cnf, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + -1) <= 0) { + rcu_read_unlock(); + goto done; + } +cont: + idx++; + } + rcu_read_unlock(); + } + if (h == NETDEV_HASHENTRIES) { + if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, + net->ipv6.devconf_all, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } + if (h == NETDEV_HASHENTRIES + 1) { + if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, + net->ipv6.devconf_dflt, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, NLM_F_MULTI, + -1) <= 0) + goto done; + else + h++; + } +done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; +} + #ifdef CONFIG_SYSCTL static void dev_forward_change(struct inet6_dev *idev) { @@ -4940,7 +5008,7 @@ int __init addrconf_init(void) __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr, NULL); __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf, - NULL, NULL); + inet6_netconf_dump_devconf, NULL); ipv6_addr_label_rtnl_register(); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index aad64352cb60..6f226c850c91 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -436,10 +436,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, if (!tb[IFAL_ADDRESS]) return -EINVAL; - pfx = nla_data(tb[IFAL_ADDRESS]); - if (!pfx) - return -EINVAL; if (!tb[IFAL_LABEL]) return -EINVAL; @@ -561,10 +558,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, if (!tb[IFAL_ADDRESS]) return -EINVAL; - addr = nla_data(tb[IFAL_ADDRESS]); - if (!addr) - return -EINVAL; rcu_read_lock(); p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 6b793bfc0e10..f56277f15903 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -323,7 +323,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct net_device *dev = NULL; rcu_read_lock(); - if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (__ipv6_addr_needs_scope_id(addr_type)) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another one @@ -471,8 +471,8 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, sin->sin6_port = inet->inet_sport; } - if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin->sin6_scope_id = sk->sk_bound_dev_if; + sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr, + sk->sk_bound_dev_if); *uaddr_len = sizeof(*sin); return 0; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index f5a54782a340..4b56cbbc7890 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -124,7 +124,7 @@ ipv4_connected: goto out; } - if (addr_type&IPV6_ADDR_LINKLOCAL) { + if (__ipv6_addr_needs_scope_id(addr_type)) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { if (sk->sk_bound_dev_if && @@ -355,18 +355,19 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_port = serr->port; - sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset), struct ipv6hdr, daddr); sin->sin6_addr = ip6h->daddr; if (np->sndflow) sin->sin6_flowinfo = ip6_flowinfo(ip6h); - if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin->sin6_scope_id = IP6CB(skb)->iif; + sin->sin6_scope_id = + ipv6_iface_scope_id(&sin->sin6_addr, + IP6CB(skb)->iif); } else { ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset), &sin->sin6_addr); + sin->sin6_scope_id = 0; } } @@ -376,18 +377,19 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; - sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { sin->sin6_addr = ipv6_hdr(skb)->saddr; if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); - if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin->sin6_scope_id = IP6CB(skb)->iif; + sin->sin6_scope_id = + ipv6_iface_scope_id(&sin->sin6_addr, + IP6CB(skb)->iif); } else { struct inet_sock *inet = inet_sk(sk); ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin->sin6_addr); + sin->sin6_scope_id = 0; if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } @@ -592,7 +594,9 @@ int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, sin6.sin6_addr = ipv6_hdr(skb)->daddr; sin6.sin6_port = ports[1]; sin6.sin6_flowinfo = 0; - sin6.sin6_scope_id = 0; + sin6.sin6_scope_id = + ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr, + opt->iif); put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fff5bdd8b680..71b900c3f4ff 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -434,7 +434,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * Source addr check */ - if (addr_type & IPV6_ADDR_LINKLOCAL) + if (__ipv6_addr_needs_scope_id(addr_type)) iif = skb->dev->ifindex; /* diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 9bfab19ff3c0..e4311cbc8b4e 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -54,6 +54,10 @@ int inet6_csk_bind_conflict(const struct sock *sk, if (ipv6_rcv_saddr_equal(sk, sk2)) break; } + if (!relax && reuse && sk2->sk_reuse && + sk2->sk_state != TCP_LISTEN && + ipv6_rcv_saddr_equal(sk, sk2)) + break; } } @@ -169,10 +173,8 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) sin6->sin6_port = inet_sk(sk)->inet_dport; /* We do not store received flowlabel for TCP */ sin6->sin6_flowinfo = 0; - sin6->sin6_scope_id = 0; - if (sk->sk_bound_dev_if && - ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin6->sin6_scope_id = sk->sk_bound_dev_if; + sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, + sk->sk_bound_dev_if); } EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index b973ed3d06cf..46e88433ec7d 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -144,7 +144,9 @@ static void ip6_fl_gc(unsigned long dummy) spin_lock(&ip6_fl_lock); for (i=0; i<=FL_HASH_MASK; i++) { - struct ip6_flowlabel *fl, **flp; + struct ip6_flowlabel *fl; + struct ip6_flowlabel __rcu **flp; + flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { @@ -179,7 +181,9 @@ static void __net_exit ip6_fl_purge(struct net *net) spin_lock(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { - struct ip6_flowlabel *fl, **flp; + struct ip6_flowlabel *fl; + struct ip6_flowlabel __rcu **flp; + flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { @@ -506,7 +510,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) struct ipv6_pinfo *np = inet6_sk(sk); struct in6_flowlabel_req freq; struct ipv6_fl_socklist *sfl1=NULL; - struct ipv6_fl_socklist *sfl, **sflp; + struct ipv6_fl_socklist *sfl; + struct ipv6_fl_socklist __rcu **sflp; struct ip6_flowlabel *fl, *fl1 = NULL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index e4efffe2522e..6a6ba73ff265 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -667,7 +667,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, struct net_device_stats *stats = &tunnel->dev->stats; int err = -1; u8 proto; - int pkt_len; struct sk_buff *new_skb; if (dev->type == ARPHRD_ETHER) @@ -801,23 +800,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, } } - nf_reset(skb); - pkt_len = skb->len; - err = ip6_local_out(skb); - - if (net_xmit_eval(err) == 0) { - struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats); - - tstats->tx_bytes += pkt_len; - tstats->tx_packets++; - } else { - stats->tx_errors++; - stats->tx_aborted_errors++; - } - + ip6tunnel_xmit(skb, dev); if (ndst) ip6_tnl_dst_store(tunnel, ndst); - return 0; tx_err_link_failure: stats->tx_carrier_errors++; diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 8234c1dcdf72..71b766ee821d 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -92,14 +92,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u8 *prevhdr; int offset = 0; - if (!(features & NETIF_F_V6_CSUM)) - features &= ~NETIF_F_SG; - if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL | SKB_GSO_TCPV6 | 0))) goto out; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index fff83cbc197f..bef3fedfdc56 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -955,7 +955,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err = -1; - int pkt_len; if (!fl6->flowi6_mark) dst = ip6_tnl_dst_check(t); @@ -1035,19 +1034,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ipv6h->nexthdr = proto; ipv6h->saddr = fl6->saddr; ipv6h->daddr = fl6->daddr; - nf_reset(skb); - pkt_len = skb->len; - err = ip6_local_out(skb); - - if (net_xmit_eval(err) == 0) { - struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats); - - tstats->tx_bytes += pkt_len; - tstats->tx_packets++; - } else { - stats->tx_errors++; - stats->tx_aborted_errors++; - } + ip6tunnel_xmit(skb, dev); if (ndst) ip6_tnl_dst_store(t, ndst); return 0; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 76ef4353d518..2712ab22a174 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -610,8 +610,6 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, } } #endif - if (!dev->addr_len) - send_sllao = 0; if (send_sllao) optlen += ndisc_opt_addr_space(dev); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 330b5e7b7df6..eedff8ccded5 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -263,7 +263,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; - if (addr_type & IPV6_ADDR_LINKLOCAL) { + if (__ipv6_addr_needs_scope_id(addr_type)) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another @@ -498,9 +498,8 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, sin6->sin6_port = 0; sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_flowinfo = 0; - sin6->sin6_scope_id = 0; - if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin6->sin6_scope_id = IP6CB(skb)->iif; + sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, + IP6CB(skb)->iif); } sock_recv_ts_and_drops(msg, sk, skb); @@ -802,7 +801,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && - ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) + __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 02f96dcbcf02..898e671a526b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -899,6 +899,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, if ((iph->ttl = tiph->ttl) == 0) iph->ttl = iph6->hop_limit; + skb->ip_summed = CHECKSUM_NONE; + ip_select_ident(iph, skb_dst(skb), NULL); iptunnel_xmit(skb, dev); return NETDEV_TX_OK; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1ce..3ed57eced376 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -450,15 +450,16 @@ try_again: sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; - sin6->sin6_scope_id = 0; - if (is_udp4) + if (is_udp4) { ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); - else { + sin6->sin6_scope_id = 0; + } else { sin6->sin6_addr = ipv6_hdr(skb)->saddr; - if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - sin6->sin6_scope_id = IP6CB(skb)->iif; + sin6->sin6_scope_id = + ipv6_iface_scope_id(&sin6->sin6_addr, + IP6CB(skb)->iif); } } @@ -1118,7 +1119,7 @@ do_udp_sendmsg: if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && - ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) + __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index cf05cf073c51..3bb3a891a424 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -21,6 +21,10 @@ static int udp6_ufo_send_check(struct sk_buff *skb) const struct ipv6hdr *ipv6h; struct udphdr *uh; + /* UDP Tunnel offload on ipv6 is not yet supported. */ + if (skb->encapsulation) + return -EINVAL; + if (!pskb_may_pull(skb, sizeof(*uh))) return -EINVAL; @@ -56,7 +60,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; - if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | + if (unlikely(type & ~(SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 571f1d211f4d..79b1876b6cd2 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -981,6 +981,7 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) }, [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, + [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, }; static void htb_work_func(struct work_struct *work) @@ -994,7 +995,7 @@ static void htb_work_func(struct work_struct *work) static int htb_init(struct Qdisc *sch, struct nlattr *opt) { struct htb_sched *q = qdisc_priv(sch); - struct nlattr *tb[TCA_HTB_INIT + 1]; + struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_glob *gopt; int err; int i; @@ -1002,20 +1003,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) if (!opt) return -EINVAL; - err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy); + err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); if (err < 0) return err; - if (tb[TCA_HTB_INIT] == NULL) { - pr_err("HTB: hey probably you have bad tc tool ?\n"); + if (!tb[TCA_HTB_INIT]) return -EINVAL; - } + gopt = nla_data(tb[TCA_HTB_INIT]); - if (gopt->version != HTB_VER >> 16) { - pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n", - HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); + if (gopt->version != HTB_VER >> 16) return -EINVAL; - } err = qdisc_class_hash_init(&q->clhash); if (err < 0) @@ -1027,10 +1024,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) INIT_WORK(&q->work, htb_work_func); skb_queue_head_init(&q->direct_queue); - q->direct_qlen = qdisc_dev(sch)->tx_queue_len; - if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ - q->direct_qlen = 2; - + if (tb[TCA_HTB_DIRECT_QLEN]) + q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); + else { + q->direct_qlen = qdisc_dev(sch)->tx_queue_len; + if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ + q->direct_qlen = 2; + } if ((q->rate2quantum = gopt->rate2quantum) < 1) q->rate2quantum = 1; q->defcls = gopt->defcls; @@ -1056,7 +1056,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt)) + if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) || + nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) goto nla_put_failure; nla_nest_end(skb, nest); @@ -1311,7 +1312,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)*arg, *parent; struct nlattr *opt = tca[TCA_OPTIONS]; - struct nlattr *tb[__TCA_HTB_MAX]; + struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_opt *hopt; /* extract all subattrs from opt attr */ |