diff options
Diffstat (limited to 'drivers/net')
49 files changed, 948 insertions, 384 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6fae5f3ec7f6..d95fbc34b229 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); - netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) - netdev_bonding_change(bond->dev, - NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, + bond->dev); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1560,8 +1560,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, bond_dev->type, slave_dev->type); - res = netdev_bonding_change(bond_dev, - NETDEV_PRE_TYPE_CHANGE); + res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, + bond_dev); res = notifier_to_errno(res); if (res) { pr_err("%s: refused to change device type\n", @@ -1581,8 +1581,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; } - netdev_bonding_change(bond_dev, - NETDEV_POST_TYPE_CHANGE); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, + bond_dev); } } else if (bond_dev->type != slave_dev->type) { pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", @@ -1943,7 +1943,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } block_netpoll_tx(); - netdev_bonding_change(bond_dev, NETDEV_RELEASE); + call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -2586,7 +2586,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } @@ -3205,7 +3205,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index f0921d16f0a9..6ff7ad006c30 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -74,7 +74,6 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) const struct platform_device_id *id; struct resource *mem; int irq; -#ifdef CONFIG_HAVE_CLK struct clk *clk; /* get the appropriate clk */ @@ -84,7 +83,6 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) ret = -ENODEV; goto exit; } -#endif /* get the platform data */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -145,10 +143,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) dev->irq = irq; priv->base = addr; -#ifdef CONFIG_HAVE_CLK priv->can.clock.freq = clk_get_rate(clk); priv->priv = clk; -#endif platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -172,10 +168,8 @@ exit_iounmap: exit_release_mem: release_mem_region(mem->start, resource_size(mem)); exit_free_clk: -#ifdef CONFIG_HAVE_CLK clk_put(clk); exit: -#endif dev_err(&pdev->dev, "probe failed\n"); return ret; @@ -196,9 +190,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); -#ifdef CONFIG_HAVE_CLK clk_put(priv->priv); -#endif return 0; } diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..62f754bd0dfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 8596acaa402b..d49933ed551f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, #endif while (n--) { - pg = alloc_page(gfp); + pg = __skb_alloc_page(gfp, NULL); if (unlikely(!pg)) { q->alloc_failed++; break; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f2d1ecdcaf98..8877fbfefb63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -653,7 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, alloc_small_pages: while (n--) { - page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD); + page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); if (unlikely(!page)) { fl->alloc_failed++; break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..90a903d83d87 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1050411e7ca3..b7c2d5050572 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6235,7 +6235,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; if (!page) { - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = __skb_alloc_page(GFP_ATOMIC, bi->skb); bi->page = page; if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c709eae58c63..4326f74f7137 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1141,8 +1141,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, /* alloc new page for storage */ if (likely(!page)) { - page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - ixgbe_rx_pg_order(rx_ring)); + page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, + bi->skb, ixgbe_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_rx_page_failed++; return false; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3f9841d619ad..60ef64587412 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -352,7 +352,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, adapter->alloc_rx_buff_failed++; goto no_buffers; } - bi->skb = skb; } if (!bi->dma) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..827b72dfce99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, } /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - - /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index cd827ff4a021..c42bbb16cdae 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -6,19 +6,21 @@ * Copyright (C) 2009 Cavium Networks */ -#include <linux/capability.h> +#include <linux/platform_device.h> #include <linux/dma-mapping.h> -#include <linux/init.h> -#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/capability.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/if.h> +#include <linux/spinlock.h> #include <linux/if_vlan.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/init.h> #include <linux/slab.h> #include <linux/phy.h> -#include <linux/spinlock.h> +#include <linux/io.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-mixx-defs.h> @@ -58,8 +60,56 @@ union mgmt_port_ring_entry { } s; }; +#define MIX_ORING1 0x0 +#define MIX_ORING2 0x8 +#define MIX_IRING1 0x10 +#define MIX_IRING2 0x18 +#define MIX_CTL 0x20 +#define MIX_IRHWM 0x28 +#define MIX_IRCNT 0x30 +#define MIX_ORHWM 0x38 +#define MIX_ORCNT 0x40 +#define MIX_ISR 0x48 +#define MIX_INTENA 0x50 +#define MIX_REMCNT 0x58 +#define MIX_BIST 0x78 + +#define AGL_GMX_PRT_CFG 0x10 +#define AGL_GMX_RX_FRM_CTL 0x18 +#define AGL_GMX_RX_FRM_MAX 0x30 +#define AGL_GMX_RX_JABBER 0x38 +#define AGL_GMX_RX_STATS_CTL 0x50 + +#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 +#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 +#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 + +#define AGL_GMX_RX_ADR_CTL 0x100 +#define AGL_GMX_RX_ADR_CAM_EN 0x108 +#define AGL_GMX_RX_ADR_CAM0 0x180 +#define AGL_GMX_RX_ADR_CAM1 0x188 +#define AGL_GMX_RX_ADR_CAM2 0x190 +#define AGL_GMX_RX_ADR_CAM3 0x198 +#define AGL_GMX_RX_ADR_CAM4 0x1a0 +#define AGL_GMX_RX_ADR_CAM5 0x1a8 + +#define AGL_GMX_TX_STATS_CTL 0x268 +#define AGL_GMX_TX_CTL 0x270 +#define AGL_GMX_TX_STAT0 0x280 +#define AGL_GMX_TX_STAT1 0x288 +#define AGL_GMX_TX_STAT2 0x290 +#define AGL_GMX_TX_STAT3 0x298 +#define AGL_GMX_TX_STAT4 0x2a0 +#define AGL_GMX_TX_STAT5 0x2a8 +#define AGL_GMX_TX_STAT6 0x2b0 +#define AGL_GMX_TX_STAT7 0x2b8 +#define AGL_GMX_TX_STAT8 0x2c0 +#define AGL_GMX_TX_STAT9 0x2c8 + struct octeon_mgmt { struct net_device *netdev; + u64 mix; + u64 agl; int port; int irq; u64 *tx_ring; @@ -85,31 +135,34 @@ struct octeon_mgmt { struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; struct phy_device *phydev; + struct device_node *phy_np; + resource_size_t mix_phys; + resource_size_t mix_size; + resource_size_t agl_phys; + resource_size_t agl_size; }; static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) { - int port = p->port; union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.ithena = enable ? 1 : 0; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) { - int port = p->port; union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.othena = enable ? 1 : 0; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } @@ -146,7 +199,6 @@ static unsigned int ring_size_to_bytes(unsigned int ring_size) static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { unsigned int size; @@ -177,24 +229,23 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; p->rx_current_fill++; /* Ring the bell. */ - cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); + cvmx_write_csr(p->mix + MIX_IRING2, 1); } } static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) { - int port = p->port; union cvmx_mixx_orcnt mix_orcnt; union mgmt_port_ring_entry re; struct sk_buff *skb; int cleaned = 0; unsigned long flags; - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); while (mix_orcnt.s.orcnt) { spin_lock_irqsave(&p->tx_list.lock, flags); - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); if (mix_orcnt.s.orcnt == 0) { spin_unlock_irqrestore(&p->tx_list.lock, flags); @@ -214,7 +265,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) mix_orcnt.s.orcnt = 1; /* Acknowledge to hardware that we have the buffer. */ - cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); + cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); p->tx_current_fill--; spin_unlock_irqrestore(&p->tx_list.lock, flags); @@ -224,7 +275,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) dev_kfree_skb_any(skb); cleaned++; - mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); } if (cleaned && netif_queue_stopped(p->netdev)) @@ -241,13 +292,12 @@ static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) static void octeon_mgmt_update_rx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; unsigned long flags; u64 drop, bad; /* These reads also clear the count registers. */ - drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); - bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); + drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); + bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); if (drop || bad) { /* Do an atomic update. */ @@ -261,15 +311,14 @@ static void octeon_mgmt_update_rx_stats(struct net_device *netdev) static void octeon_mgmt_update_tx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; unsigned long flags; union cvmx_agl_gmx_txx_stat0 s0; union cvmx_agl_gmx_txx_stat1 s1; /* These reads also clear the count registers. */ - s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); - s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); + s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); + s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { /* Do an atomic update. */ @@ -308,7 +357,6 @@ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, static int octeon_mgmt_receive_one(struct octeon_mgmt *p) { - int port = p->port; struct net_device *netdev = p->netdev; union cvmx_mixx_ircnt mix_ircnt; union mgmt_port_ring_entry re; @@ -381,18 +429,17 @@ done: /* Tell the hardware we processed a packet. */ mix_ircnt.u64 = 0; mix_ircnt.s.ircnt = 1; - cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); + cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); return rc; } static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) { - int port = p->port; unsigned int work_done = 0; union cvmx_mixx_ircnt mix_ircnt; int rc; - mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); while (work_done < budget && mix_ircnt.s.ircnt) { rc = octeon_mgmt_receive_one(p); @@ -400,7 +447,7 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) work_done++; /* Check for more packets. */ - mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); } octeon_mgmt_rx_fill_ring(p->netdev); @@ -434,16 +481,16 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) union cvmx_agl_gmx_bist agl_gmx_bist; mix_ctl.u64 = 0; - cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.busy); mix_ctl.s.reset = 1; - cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); - cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + cvmx_read_csr(p->mix + MIX_CTL); cvmx_wait(64); - mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); + mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); if (mix_bist.u64) dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", (unsigned long long)mix_bist.u64); @@ -474,7 +521,6 @@ static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; unsigned long flags; @@ -520,29 +566,29 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) spin_lock_irqsave(&p->lock, flags); /* Disable packet I/O. */ - agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prev_packet_enable = agl_gmx_prtx.s.en; agl_gmx_prtx.s.en = 0; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); adr_ctl.u64 = 0; adr_ctl.s.cam_mode = cam_mode; adr_ctl.s.mcst = multicast_mode; adr_ctl.s.bcst = 1; /* Allow broadcast */ - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); - cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); /* Restore packet I/O. */ agl_gmx_prtx.s.en = prev_packet_enable; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); spin_unlock_irqrestore(&p->lock, flags); } @@ -564,7 +610,6 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; /* @@ -580,8 +625,8 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; - cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); - cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, (size_without_fcs + 7) & 0xfff8); return 0; @@ -591,14 +636,13 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) { struct net_device *netdev = dev_id; struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_mixx_isr mixx_isr; - mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); + mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); /* Clear any pending interrupts */ - cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64); - cvmx_read_csr(CVMX_MIXX_ISR(port)); + cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); + cvmx_read_csr(p->mix + MIX_ISR); if (mixx_isr.s.irthresh) { octeon_mgmt_disable_rx_irq(p); @@ -629,7 +673,6 @@ static int octeon_mgmt_ioctl(struct net_device *netdev, static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union cvmx_agl_gmx_prtx_cfg prtx_cfg; unsigned long flags; int link_changed = 0; @@ -640,11 +683,9 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) link_changed = 1; if (p->last_duplex != p->phydev->duplex) { p->last_duplex = p->phydev->duplex; - prtx_cfg.u64 = - cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.duplex = p->phydev->duplex; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), - prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); } } else { if (p->last_link) @@ -670,18 +711,16 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - char phy_id[MII_BUS_ID_SIZE + 3]; - if (octeon_is_simulation()) { + if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ netif_carrier_on(netdev); return 0; } - snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port); - - p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + p->phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); if (IS_ERR(p->phydev)) { p->phydev = NULL; @@ -737,14 +776,14 @@ static int octeon_mgmt_open(struct net_device *netdev) octeon_mgmt_reset_hw(p); - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); /* Bring it out of reset if needed. */ if (mix_ctl.s.reset) { mix_ctl.s.reset = 0; - cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { - mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.reset); } @@ -755,17 +794,17 @@ static int octeon_mgmt_open(struct net_device *netdev) oring1.u64 = 0; oring1.s.obase = p->tx_ring_handle >> 3; oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; - cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); + cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); iring1.u64 = 0; iring1.s.ibase = p->rx_ring_handle >> 3; iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; - cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); + cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); /* Disable packet I/O. */ - prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.en = 0; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); octeon_mgmt_set_mac_address(netdev, &sa); @@ -782,7 +821,7 @@ static int octeon_mgmt_open(struct net_device *netdev) mix_ctl.s.nbtarb = 0; /* Arbitration mode */ /* MII CB-request FIFO programmable high watermark */ mix_ctl.s.mrq_hwm = 1; - cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { @@ -809,16 +848,16 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Clear statistics. */ /* Clear on read. */ - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); - cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); - cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); /* Clear any pending interrupts */ - cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); + cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, netdev)) { @@ -829,18 +868,18 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Interrupt every single RX packet */ mix_irhwm.u64 = 0; mix_irhwm.s.irhwm = 0; - cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); + cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); /* Interrupt when we have 1 or more packets to clean. */ mix_orhwm.u64 = 0; mix_orhwm.s.orhwm = 1; - cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); + cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); /* Enable receive and transmit interrupts */ mix_intena.u64 = 0; mix_intena.s.ithena = 1; mix_intena.s.othena = 1; - cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); /* Enable packet I/O. */ @@ -871,7 +910,7 @@ static int octeon_mgmt_open(struct net_device *netdev) * frame. GMX checks that the PREAMBLE is sent correctly. */ rxx_frm_ctl.s.pre_chk = 1; - cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); /* Enable the AGL block */ agl_gmx_inf_mode.u64 = 0; @@ -879,13 +918,13 @@ static int octeon_mgmt_open(struct net_device *netdev) cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); /* Configure the port duplex and enables */ - prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.tx_en = 1; prtx_cfg.s.rx_en = 1; prtx_cfg.s.en = 1; p->last_duplex = 1; prtx_cfg.s.duplex = p->last_duplex; - cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); p->last_link = 0; netif_carrier_off(netdev); @@ -949,7 +988,6 @@ static int octeon_mgmt_stop(struct net_device *netdev) static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); - int port = p->port; union mgmt_port_ring_entry re; unsigned long flags; int rv = NETDEV_TX_BUSY; @@ -993,7 +1031,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->stats.tx_bytes += skb->len; /* Ring the bell. */ - cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); + cvmx_write_csr(p->mix + MIX_ORING2, 1); rv = NETDEV_TX_OK; out: @@ -1071,10 +1109,14 @@ static const struct net_device_ops octeon_mgmt_ops = { static int __devinit octeon_mgmt_probe(struct platform_device *pdev) { - struct resource *res_irq; struct net_device *netdev; struct octeon_mgmt *p; - int i; + const __be32 *data; + const u8 *mac; + struct resource *res_mix; + struct resource *res_agl; + int len; + int result; netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); if (netdev == NULL) @@ -1088,14 +1130,63 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) p->netdev = netdev; p->dev = &pdev->dev; - p->port = pdev->id; + data = of_get_property(pdev->dev.of_node, "cell-index", &len); + if (data && len == sizeof(*data)) { + p->port = be32_to_cpup(data); + } else { + dev_err(&pdev->dev, "no 'cell-index' property\n"); + result = -ENXIO; + goto err; + } + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); - res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res_irq) + result = platform_get_irq(pdev, 0); + if (result < 0) + goto err; + + p->irq = result; + + res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mix == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_agl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + p->mix_phys = res_mix->start; + p->mix_size = resource_size(res_mix); + p->agl_phys = res_agl->start; + p->agl_size = resource_size(res_agl); + + + if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, + res_mix->name)) { + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_mix->name); + result = -ENXIO; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, + res_agl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl->name); goto err; + } + + + p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); + p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); - p->irq = res_irq->start; spin_lock_init(&p->lock); skb_queue_head_init(&p->tx_list); @@ -1108,24 +1199,26 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev) netdev->netdev_ops = &octeon_mgmt_ops; netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; - /* The mgmt ports get the first N MACs. */ - for (i = 0; i < 6; i++) - netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; - netdev->dev_addr[5] += p->port; + mac = of_get_mac_address(pdev->dev.of_node); + + if (mac) + memcpy(netdev->dev_addr, mac, 6); - if (p->port >= octeon_bootinfo->mac_addr_count) - dev_err(&pdev->dev, - "Error %s: Using MAC outside of the assigned range: %pM\n", - netdev->name, netdev->dev_addr); + p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (register_netdev(netdev)) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + result = register_netdev(netdev); + if (result) goto err; dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); return 0; + err: free_netdev(netdev); - return -ENOENT; + return result; } static int __devexit octeon_mgmt_remove(struct platform_device *pdev) @@ -1137,10 +1230,19 @@ static int __devexit octeon_mgmt_remove(struct platform_device *pdev) return 0; } +static struct of_device_id octeon_mgmt_match[] = { + { + .compatible = "cavium,octeon-5750-mix", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mgmt_match); + static struct platform_driver octeon_mgmt_driver = { .driver = { .name = "octeon_mgmt", .owner = THIS_MODULE, + .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, .remove = __devexit_p(octeon_mgmt_remove), diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 70554a1b2b02..65a8d49106a4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 10536f93b561..8cba2df82b18 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 9b225a7769f7..18713436b443 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ab4c376cb276..f2d3665430ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -82,9 +82,7 @@ struct stmmac_priv { struct stmmac_counters mmc; struct dma_features dma_cap; int hw_cap_support; -#ifdef CONFIG_HAVE_CLK struct clk *stmmac_clk; -#endif int clk_csr; int synopsys_id; struct timer_list eee_ctrl_timer; @@ -113,46 +111,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, void stmmac_disable_eee_mode(struct stmmac_priv *priv); bool stmmac_eee_init(struct stmmac_priv *priv); -#ifdef CONFIG_HAVE_CLK -static inline int stmmac_clk_enable(struct stmmac_priv *priv) -{ - if (!IS_ERR(priv->stmmac_clk)) - return clk_prepare_enable(priv->stmmac_clk); - - return 0; -} - -static inline void stmmac_clk_disable(struct stmmac_priv *priv) -{ - if (IS_ERR(priv->stmmac_clk)) - return; - - clk_disable_unprepare(priv->stmmac_clk); -} -static inline int stmmac_clk_get(struct stmmac_priv *priv) -{ - priv->stmmac_clk = clk_get(priv->device, NULL); - - if (IS_ERR(priv->stmmac_clk)) - return PTR_ERR(priv->stmmac_clk); - - return 0; -} -#else -static inline int stmmac_clk_enable(struct stmmac_priv *priv) -{ - return 0; -} -static inline void stmmac_clk_disable(struct stmmac_priv *priv) -{ -} -static inline int stmmac_clk_get(struct stmmac_priv *priv) -{ - return 0; -} -#endif /* CONFIG_HAVE_CLK */ - - #ifdef CONFIG_STMMAC_PLATFORM extern struct platform_driver stmmac_pltfr_driver; static inline int stmmac_register_platform(void) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f6b04c1a3672..fd8882f9602a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -28,6 +28,7 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include <linux/clk.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/ip.h> @@ -173,12 +174,8 @@ static void stmmac_verify_args(void) static void stmmac_clk_csr_set(struct stmmac_priv *priv) { -#ifdef CONFIG_HAVE_CLK u32 clk_rate; - if (IS_ERR(priv->stmmac_clk)) - return; - clk_rate = clk_get_rate(priv->stmmac_clk); /* Platform provided default clk_csr would be assumed valid @@ -200,7 +197,6 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) * we can not estimate the proper divider as it is not known * the frequency of clk_csr_i. So we do not change the default * divider. */ -#endif } #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) @@ -1070,7 +1066,7 @@ static int stmmac_open(struct net_device *dev) } else priv->tm->enable = 1; #endif - stmmac_clk_enable(priv); + clk_enable(priv->stmmac_clk); stmmac_check_ether_addr(priv); @@ -1192,7 +1188,7 @@ open_error: if (priv->phydev) phy_disconnect(priv->phydev); - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); return ret; } @@ -1250,7 +1246,7 @@ static int stmmac_release(struct net_device *dev) #ifdef CONFIG_STMMAC_DEBUG_FS stmmac_exit_fs(); #endif - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); return 0; } @@ -2078,11 +2074,14 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, ret = register_netdev(ndev); if (ret) { pr_err("%s: ERROR %i registering the device\n", __func__, ret); - goto error; + goto error_netdev_register; } - if (stmmac_clk_get(priv)) + priv->stmmac_clk = clk_get(priv->device, NULL); + if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); + goto error_clk_get; + } /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be @@ -2100,15 +2099,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, if (ret < 0) { pr_debug("%s: MDIO bus (id: %d) registration failed", __func__, priv->plat->bus_id); - goto error; + goto error_mdio_register; } return priv; -error: - netif_napi_del(&priv->napi); - +error_mdio_register: + clk_put(priv->stmmac_clk); +error_clk_get: unregister_netdev(ndev); +error_netdev_register: + netif_napi_del(&priv->napi); free_netdev(ndev); return NULL; @@ -2177,7 +2178,7 @@ int stmmac_suspend(struct net_device *ndev) else { stmmac_set_mac(priv->ioaddr, false); /* Disable clock in case of PWM is off */ - stmmac_clk_disable(priv); + clk_disable(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -2202,7 +2203,7 @@ int stmmac_resume(struct net_device *ndev) priv->hw->mac->pmt(priv->ioaddr, 0); else /* enable the clk prevously disabled */ - stmmac_clk_enable(priv); + clk_enable(priv->stmmac_clk); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1b173a6145d6..b26cbda5efa9 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) ---help--- This driver supports TI's DaVinci CPDMA dma engine. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1e5d85b06e71..0cbc0e59252c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -28,6 +28,9 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_device.h> #include <linux/platform_data/cpsw.h> @@ -709,6 +712,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->sliver = regs + data->sliver_reg_ofs; } +static int cpsw_probe_dt(struct cpsw_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0, ret; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "slaves", &prop)) { + pr_err("Missing slaves property in the DT.\n"); + return -EINVAL; + } + data->slaves = prop; + + data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * + data->slaves, GFP_KERNEL); + if (!data->slave_data) { + pr_err("Could not allocate slave memory.\n"); + return -EINVAL; + } + + data->no_bd_ram = of_property_read_bool(node, "no_bd_ram"); + + if (of_property_read_u32(node, "cpdma_channels", &prop)) { + pr_err("Missing cpdma_channels property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->channels = prop; + + if (of_property_read_u32(node, "host_port_no", &prop)) { + pr_err("Missing host_port_no property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_num = prop; + + if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) { + pr_err("Missing cpdma_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_reg_ofs = prop; + + if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) { + pr_err("Missing cpdma_sram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_sram_ofs = prop; + + if (of_property_read_u32(node, "ale_reg_ofs", &prop)) { + pr_err("Missing ale_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_reg_ofs = prop; + + if (of_property_read_u32(node, "ale_entries", &prop)) { + pr_err("Missing ale_entries property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_entries = prop; + + if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) { + pr_err("Missing host_port_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_reg_ofs = prop; + + if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) { + pr_err("Missing hw_stats_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->hw_stats_reg_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_ofs", &prop)) { + pr_err("Missing bd_ram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_size", &prop)) { + pr_err("Missing bd_ram_size property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_size = prop; + + if (of_property_read_u32(node, "rx_descs", &prop)) { + pr_err("Missing rx_descs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->rx_descs = prop; + + if (of_property_read_u32(node, "mac_control", &prop)) { + pr_err("Missing mac_control property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->mac_control = prop; + + for_each_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = data->slave_data + i; + const char *phy_id = NULL; + const void *mac_addr = NULL; + + if (of_property_read_string(slave_node, "phy_id", &phy_id)) { + pr_err("Missing slave[%d] phy_id property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->phy_id = phy_id; + + if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) { + pr_err("Missing slave[%d] slave_reg_ofs property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->slave_reg_ofs = prop; + + if (of_property_read_u32(slave_node, "sliver_reg_ofs", + &prop)) { + pr_err("Missing slave[%d] sliver_reg_ofs property\n", + i); + ret = -EINVAL; + goto error_ret; + } + slave_data->sliver_reg_ofs = prop; + + mac_addr = of_get_mac_address(slave_node); + if (mac_addr) + memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + + i++; + } + + return 0; + +error_ret: + kfree(data->slave_data); + return ret; +} + static int __devinit cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data = pdev->dev.platform_data; @@ -720,11 +875,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev) struct resource *res; int ret = 0, i, k = 0; - if (!data) { - pr_err("platform data missing\n"); - return -ENODEV; - } - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); if (!ndev) { pr_err("error allocating net_device\n"); @@ -734,13 +884,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); spin_lock_init(&priv->lock); - priv->data = *data; priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + if (cpsw_probe_dt(&priv->data, pdev)) { + pr_err("cpsw: platform data missing\n"); + ret = -ENODEV; + goto clean_ndev_ret; + } + data = &priv->data; + if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); pr_info("Detected MACID = %pM", priv->mac_addr); @@ -996,11 +1152,17 @@ static const struct dev_pm_ops cpsw_pm_ops = { .resume = cpsw_resume, }; +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", }, + { /* sentinel */ }, +}; + static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", .owner = THIS_MODULE, .pm = &cpsw_pm_ops, + .of_match_table = of_match_ptr(cpsw_of_mtable), }, .probe = cpsw_probe, .remove = __devexit_p(cpsw_remove), diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..573f3be5f421 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -36,6 +36,8 @@ #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> +#include <linux/of.h> +#include <linux/of_device.h> /* * This timeout definition is a worst-case ultra defensive measure against @@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +static int davinci_mdio_probe_dt(struct mdio_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "bus_freq", &prop)) { + pr_err("Missing bus_freq property in the DT.\n"); + return -EINVAL; + } + data->bus_freq = prop; + + return 0; +} + + static int __devinit davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = pdev->dev.platform_data; @@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - data->pdata = pdata ? (*pdata) : default_pdata; - data->bus = mdiobus_alloc(); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); @@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) goto bail_out; } + if (dev->of_node) { + if (davinci_mdio_probe_dt(&data->pdata, pdev)) + data->pdata = default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + } else { + data->pdata = pdata ? (*pdata) : default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + } + data->bus->name = dev_name(dev); data->bus->read = davinci_mdio_read, data->bus->write = davinci_mdio_write, data->bus->reset = davinci_mdio_reset, data->bus->parent = dev; data->bus->priv = data; - snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -454,11 +481,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = { .resume = davinci_mdio_resume, }; +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { /* sentinel */ }, +}; + static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, + .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, .remove = __devexit_p(davinci_mdio_remove), diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8c5a1c43c81d..e91111a656f7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -400,7 +400,7 @@ static void netvsc_send_garp(struct work_struct *w) ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); net = net_device->ndev; - netif_notify_peers(net); + netdev_notify_peers(net); } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..06f8601f32fc 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -46,8 +46,14 @@ struct rndis_request { /* Simplify allocation by having a netvsc packet inline */ struct hv_netvsc_packet pkt; struct hv_page_buffer buf; - /* FIXME: We assumed a fixed size request here. */ + struct rndis_message request_msg; + /* + * The buffer for the extended info after the RNDIS message. It's + * referenced based on the data offset in the RNDIS message. Its size + * is enough for current needs, and should be sufficient for the near + * future. + */ u8 ext[100]; }; @@ -718,6 +724,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +744,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index e2a06fd996d5..4a075babe193 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net) if (err) goto out_free_netdev; + BUG_ON(dev->ifindex != LOOPBACK_IFINDEX); net->loopback_dev = dev; return 0; diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index 826d961f39f7..d4015aa663e6 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -3,14 +3,17 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2009 Cavium Networks + * Copyright (C) 2009,2011 Cavium, Inc. */ -#include <linux/gfp.h> -#include <linux/init.h> -#include <linux/module.h> #include <linux/platform_device.h> +#include <linux/of_mdio.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/gfp.h> #include <linux/phy.h> +#include <linux/io.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-smix-defs.h> @@ -18,9 +21,17 @@ #define DRV_VERSION "1.0" #define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver" +#define SMI_CMD 0x0 +#define SMI_WR_DAT 0x8 +#define SMI_RD_DAT 0x10 +#define SMI_CLK 0x18 +#define SMI_EN 0x20 + struct octeon_mdiobus { struct mii_bus *mii_bus; - int unit; + u64 register_base; + resource_size_t mdio_phys; + resource_size_t regsize; int phy_irq[PHY_MAX_ADDR]; }; @@ -35,15 +46,15 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); do { /* * Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ - cvmx_wait(1000); - smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit)); + __delay(1000); + smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT); } while (smi_rd.s.pending && --timeout); if (smi_rd.s.val) @@ -62,21 +73,21 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, smi_wr.u64 = 0; smi_wr.s.dat = val; - cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64); + cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); smi_cmd.u64 = 0; smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); do { /* * Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ - cvmx_wait(1000); - smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit)); + __delay(1000); + smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) @@ -88,38 +99,44 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, static int __devinit octeon_mdiobus_probe(struct platform_device *pdev) { struct octeon_mdiobus *bus; + struct resource *res_mem; union cvmx_smix_en smi_en; - int i; int err = -ENOENT; bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; - /* The platform_device id is our unit number. */ - bus->unit = pdev->id; + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (res_mem == NULL) { + dev_err(&pdev->dev, "found no memory resource\n"); + err = -ENXIO; + goto fail; + } + bus->mdio_phys = res_mem->start; + bus->regsize = resource_size(res_mem); + if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize, + res_mem->name)) { + dev_err(&pdev->dev, "request_mem_region failed\n"); + goto fail; + } + bus->register_base = + (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize); bus->mii_bus = mdiobus_alloc(); if (!bus->mii_bus) - goto err; + goto fail; smi_en.u64 = 0; smi_en.s.en = 1; - cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64); - - /* - * Standard Octeon evaluation boards don't support phy - * interrupts, we need to poll. - */ - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->phy_irq[i] = PHY_POLL; + cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); bus->mii_bus->priv = bus; bus->mii_bus->irq = bus->phy_irq; bus->mii_bus->name = "mdio-octeon"; - snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - bus->mii_bus->name, bus->unit); + snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); bus->mii_bus->parent = &pdev->dev; bus->mii_bus->read = octeon_mdiobus_read; @@ -127,20 +144,18 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, bus); - err = mdiobus_register(bus->mii_bus); + err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node); if (err) - goto err_register; + goto fail_register; dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); return 0; -err_register: +fail_register: mdiobus_free(bus->mii_bus); - -err: - devm_kfree(&pdev->dev, bus); +fail: smi_en.u64 = 0; - cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64); + cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); return err; } @@ -154,14 +169,23 @@ static int __devexit octeon_mdiobus_remove(struct platform_device *pdev) mdiobus_unregister(bus->mii_bus); mdiobus_free(bus->mii_bus); smi_en.u64 = 0; - cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64); + cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); return 0; } +static struct of_device_id octeon_mdiobus_match[] = { + { + .compatible = "cavium,octeon-3860-mdio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mdiobus_match); + static struct platform_driver octeon_mdiobus_driver = { .driver = { .name = "mdio-octeon", .owner = THIS_MODULE, + .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, .remove = __devexit_p(octeon_mdiobus_remove), diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 5c0557222f20..eb3f5cefeba3 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -94,6 +94,18 @@ struct ppp_file { #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) /* + * Data structure to hold primary network stats for which + * we want to use 64 bit storage. Other network stats + * are stored in dev->stats of the ppp strucute. + */ +struct ppp_link_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; +}; + +/* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. @@ -136,6 +148,7 @@ struct ppp { unsigned pass_len, active_len; #endif /* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */ + struct ppp_link_stats stats64; /* 64 bit network stats */ }; /* @@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } +struct rtnl_link_stats64* +ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) +{ + struct ppp *ppp = netdev_priv(dev); + + ppp_recv_lock(ppp); + stats64->rx_packets = ppp->stats64.rx_packets; + stats64->rx_bytes = ppp->stats64.rx_bytes; + ppp_recv_unlock(ppp); + + ppp_xmit_lock(ppp); + stats64->tx_packets = ppp->stats64.tx_packets; + stats64->tx_bytes = ppp->stats64.tx_bytes; + ppp_xmit_unlock(ppp); + + stats64->rx_errors = dev->stats.rx_errors; + stats64->tx_errors = dev->stats.tx_errors; + stats64->rx_dropped = dev->stats.rx_dropped; + stats64->tx_dropped = dev->stats.tx_dropped; + stats64->rx_length_errors = dev->stats.rx_length_errors; + + return stats64; +} + static const struct net_device_ops ppp_netdev_ops = { - .ndo_start_xmit = ppp_start_xmit, - .ndo_do_ioctl = ppp_net_ioctl, + .ndo_start_xmit = ppp_start_xmit, + .ndo_do_ioctl = ppp_net_ioctl, + .ndo_get_stats64 = ppp_get_stats64, }; static void ppp_setup(struct net_device *dev) @@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) #endif /* CONFIG_PPP_FILTER */ } - ++ppp->dev->stats.tx_packets; - ppp->dev->stats.tx_bytes += skb->len - 2; + ++ppp->stats64.tx_packets; + ppp->stats64.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: @@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) break; } - ++ppp->dev->stats.rx_packets; - ppp->dev->stats.rx_bytes += skb->len - 2; + ++ppp->stats64.rx_packets; + ppp->stats64.rx_bytes += skb->len - 2; npi = proto_to_npindex(proto); if (npi < 0) { @@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) struct slcompress *vj = ppp->vj; memset(st, 0, sizeof(*st)); - st->p.ppp_ipackets = ppp->dev->stats.rx_packets; + st->p.ppp_ipackets = ppp->stats64.rx_packets; st->p.ppp_ierrors = ppp->dev->stats.rx_errors; - st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; - st->p.ppp_opackets = ppp->dev->stats.tx_packets; + st->p.ppp_ibytes = ppp->stats64.rx_bytes; + st->p.ppp_opackets = ppp->stats64.tx_packets; st->p.ppp_oerrors = ppp->dev->stats.tx_errors; - st->p.ppp_obytes = ppp->dev->stats.tx_bytes; + st->p.ppp_obytes = ppp->stats64.tx_bytes; if (!vj) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 87707ab39430..ba10c469b02b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) } +/************************************* + * Multiqueue Tx port select override + *************************************/ + +static int team_queue_override_init(struct team *team) +{ + struct list_head *listarr; + unsigned int queue_cnt = team->dev->num_tx_queues - 1; + unsigned int i; + + if (!queue_cnt) + return 0; + listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); + if (!listarr) + return -ENOMEM; + team->qom_lists = listarr; + for (i = 0; i < queue_cnt; i++) + INIT_LIST_HEAD(listarr++); + return 0; +} + +static void team_queue_override_fini(struct team *team) +{ + kfree(team->qom_lists); +} + +static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) +{ + return &team->qom_lists[queue_id - 1]; +} + +/* + * note: already called with rcu_read_lock + */ +static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) +{ + struct list_head *qom_list; + struct team_port *port; + + if (!team->queue_override_enabled || !skb->queue_mapping) + return false; + qom_list = __team_get_qom_list(team, skb->queue_mapping); + list_for_each_entry_rcu(port, qom_list, qom_list) { + if (!team_dev_queue_xmit(team, port, skb)) + return true; + } + return false; +} + +static void __team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + list_del_rcu(&port->qom_list); + synchronize_rcu(); + INIT_LIST_HEAD(&port->qom_list); +} + +static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, + struct team_port *cur) +{ + if (port->priority < cur->priority) + return true; + if (port->priority > cur->priority) + return false; + if (port->index < cur->index) + return true; + return false; +} + +static void __team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + struct team_port *cur; + struct list_head *qom_list; + struct list_head *node; + + if (!port->queue_id || !team_port_enabled(port)) + return; + + qom_list = __team_get_qom_list(team, port->queue_id); + node = qom_list; + list_for_each_entry(cur, qom_list, qom_list) { + if (team_queue_override_port_has_gt_prio_than(port, cur)) + break; + node = &cur->qom_list; + } + list_add_tail_rcu(&port->qom_list, node); +} + +static void __team_queue_override_enabled_check(struct team *team) +{ + struct team_port *port; + bool enabled = false; + + list_for_each_entry(port, &team->port_list, list) { + if (!list_empty(&port->qom_list)) { + enabled = true; + break; + } + } + if (enabled == team->queue_override_enabled) + return; + netdev_dbg(team->dev, "%s queue override\n", + enabled ? "Enabling" : "Disabling"); + team->queue_override_enabled = enabled; +} + +static void team_queue_override_port_refresh(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + + /**************** * Port handling ****************/ @@ -688,6 +804,7 @@ static void team_port_enable(struct team *team, hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); + team_queue_override_port_refresh(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@ -716,6 +833,7 @@ static void team_port_disable(struct team *team, hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; + team_queue_override_port_refresh(team, port); __team_adjust_ops(team, team->en_port_count - 1); /* * Wait until readers see adjusted ops. This ensures that @@ -881,6 +999,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) port->dev = port_dev; port->team = team; + INIT_LIST_HEAD(&port->qom_list); port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); @@ -1092,6 +1211,49 @@ static int team_user_linkup_en_option_set(struct team *team, return 0; } +static int team_priority_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.s32_val = port->priority; + return 0; +} + +static int team_priority_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + port->priority = ctx->data.s32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + +static int team_queue_id_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.u32_val = port->queue_id; + return 0; +} + +static int team_queue_id_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + if (port->queue_id == ctx->data.u32_val) + return 0; + if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + return -EINVAL; + port->queue_id = ctx->data.u32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + + static const struct team_option team_options[] = { { .name = "mode", @@ -1120,6 +1282,20 @@ static const struct team_option team_options[] = { .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, + { + .name = "priority", + .type = TEAM_OPTION_TYPE_S32, + .per_port = true, + .getter = team_priority_option_get, + .setter = team_priority_option_set, + }, + { + .name = "queue_id", + .type = TEAM_OPTION_TYPE_U32, + .per_port = true, + .getter = team_queue_id_option_get, + .setter = team_queue_id_option_set, + }, }; static struct lock_class_key team_netdev_xmit_lock_key; @@ -1155,6 +1331,9 @@ static int team_init(struct net_device *dev) for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); + err = team_queue_override_init(team); + if (err) + goto err_team_queue_override_init; team_adjust_ops(team); @@ -1170,6 +1349,8 @@ static int team_init(struct net_device *dev) return 0; err_options_register: + team_queue_override_fini(team); +err_team_queue_override_init: free_percpu(team->pcpu_stats); return err; @@ -1187,6 +1368,7 @@ static void team_uninit(struct net_device *dev) __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_queue_override_fini(team); mutex_unlock(&team->lock); } @@ -1216,10 +1398,12 @@ static int team_close(struct net_device *dev) static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); - bool tx_success = false; + bool tx_success; unsigned int len = skb->len; - tx_success = team->ops.transmit(team, skb); + tx_success = team_queue_override_transmit(team, skb); + if (!tx_success) + tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats; @@ -1787,6 +1971,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; + case TEAM_OPTION_TYPE_S32: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) + goto nest_cancel; + if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) + goto nest_cancel; + break; default: BUG(); } @@ -1975,6 +2165,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; + case NLA_S32: + opt_type = TEAM_OPTION_TYPE_S32; + break; default: goto team_put; } @@ -2031,6 +2224,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; + case TEAM_OPTION_TYPE_S32: + ctx.data.s32_val = nla_get_s32(attr_data); + break; default: BUG(); } diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 187c144c5e5b..64610048ce87 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) struct page *page; int err; - page = alloc_page(gfp_flags); + page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL); if (!page) return -ENOMEM; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5852361032c4..e522ff70444c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tbp[IFLA_ADDRESS] == NULL) eth_hw_addr_random(peer); + if (ifmp && (dev->ifindex != 0)) + peer->ifindex = ifmp->ifi_index; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 83d2b0c34c5e..81a64c58e8ad 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { - netif_notify_peers(vi->dev); + netdev_notify_peers(vi->dev); virtnet_ack_link_announce(vi); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cfa91ab7acf8..60b6a9daff7e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah) case AR9300_DEVID_QCA955X: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: + case AR9485_DEVID_AR1111: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index dd0c146d81dc..ce7332c64efb 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -49,6 +49,7 @@ #define AR9300_DEVID_AR9462 0x0034 #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 +#define AR9485_DEVID_AR1111 0x0037 #define AR5416_AR9100_DEVID 0x000b diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 87b89d55e637..d455de9162ec 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ { 0 } }; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5efa77884704..d97a95b1addb 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev) if (dev->dev->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; + } else if (dev->dev->chip_id == 0x5354) { + /* Don't allow overtaking buttons GPIOs */ + set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */ } - if (dev->dev->chip_id == 0x5354) - set &= 0xff02; + if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); - mask |= 0x0180; - set |= 0x0180; + /* BT Coexistance Input */ + mask |= 0x0080; + set |= 0x0080; + /* BT Coexistance Out */ + mask |= 0x0100; + set |= 0x0100; } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { + /* PA is controlled by gpio 9, let ucode handle it */ b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->core_rev >= 2) - mask |= 0x0010; /* FIXME: This is redundant. */ switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, - BCMA_CC_GPIOCTL) & mask) | set); + BCMA_CC_GPIOCTL) & ~mask) | set); break; #endif #ifdef CONFIG_B43_SSB @@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev) if (gpiodev) ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 57bf1d7ee80f..9ab24528f9b9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1188,7 +1188,7 @@ exit: kfree(buf); /* close file before return */ if (fp) - filp_close(fp, current->files); + filp_close(fp, NULL); /* restore previous address limit */ set_fs(old_fs); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 9a4c63f927cb..7ed7d7577024 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; - const struct ieee80211_reg_rule *reg_rule; struct txpwr_limits txpwr; - int ret; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); @@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, ); /* set or restore gmode as required by regulatory */ - ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule); - if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM)) + if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 683a8652e155..1c70defba6c3 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -123,7 +123,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = { IEEE80211_CHAN_NO_HT40PLUS), CHAN2GHZ(14, 2484, IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | - IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS | + IEEE80211_CHAN_NO_OFDM) }; static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index eb5de800ed90..1c10b542ab23 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv, netif_tx_wake_all_queues(priv->dev); } + kfree(cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 76caebaa4397..e970897f6ab5 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } + kfree(card); lbs_deb_leave(LBS_DEB_SDIO); } diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 58048189bd24..fe1ea43c5149 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -571,7 +571,10 @@ static int lbs_thread(void *data) netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); - if (priv->reset_card) + + /* Reset card, but only when it isn't in the process + * of being shutdown anyway. */ + if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 88455b1b9fe0..cb8c2aca54e4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i, count; + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + if (rt2x00_get_field32(reg, WLAN_EN)) + return 0; + + rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); + rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); + rt2x00_set_field32(®, WLAN_CLK_EN, 0); + rt2x00_set_field32(®, WLAN_EN, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + + udelay(REGISTER_BUSY_DELAY); + + count = 0; + do { + /* + * Check PLL_LD & XTAL_RDY. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, CMB_CTRL, ®); + if (rt2x00_get_field32(reg, PLL_LD) && + rt2x00_get_field32(reg, XTAL_RDY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + if (i >= REGISTER_BUSY_COUNT) { + + if (count >= 10) + return -EIO; + + rt2800_register_write(rt2x00dev, 0x58, 0x018); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x418); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x618); + udelay(REGISTER_BUSY_DELAY); + count++; + } else { + count = 0; + } + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); + rt2x00_set_field32(®, WLAN_CLK_EN, 1); + rt2x00_set_field32(®, WLAN_RESET, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2x00_set_field32(®, WLAN_RESET, 0); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); + } while (count != 0); + + return 0; +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) @@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, { unsigned int i; u32 reg; + int retval; + + if (rt2x00_rt(rt2x00dev, RT3290)) { + retval = rt2800_enable_wlan_rt3290(rt2x00dev); + if (retval) + return -EBUSY; + } /* * If driver doesn't wake up firmware here, diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 235376e9cb04..98aa426a3564 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -{ - u32 reg; - int i, count; - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); - rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); - rt2x00_set_field32(®, WLAN_CLK_EN, 0); - rt2x00_set_field32(®, WLAN_EN, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - - udelay(REGISTER_BUSY_DELAY); - - count = 0; - do { - /* - * Check PLL_LD & XTAL_RDY. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, CMB_CTRL, ®); - if (rt2x00_get_field32(reg, PLL_LD) && - rt2x00_get_field32(reg, XTAL_RDY)) - break; - udelay(REGISTER_BUSY_DELAY); - } - - if (i >= REGISTER_BUSY_COUNT) { - - if (count >= 10) - return -EIO; - - rt2800_register_write(rt2x00dev, 0x58, 0x018); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x418); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x618); - udelay(REGISTER_BUSY_DELAY); - count++; - } else { - count = 0; - } - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); - rt2x00_set_field32(®, WLAN_CLK_EN, 1); - rt2x00_set_field32(®, WLAN_RESET, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2x00_set_field32(®, WLAN_RESET, 0); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); - } while (count != 0); - - return 0; -} static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; @@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* - * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan - * clk for rt3290. That avoid the MCU fail in start phase. - */ - if (rt2x00_rt(rt2x00dev, RT3290)) { - retval = rt2800_enable_wlan_rt3290(rt2x00dev); - - if (retval) - return retval; - } - - /* * This device has multiple filters for control frames * and has a separate filter for PS Poll frames. */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 30899901aef5..39afd37e62b3 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1731,7 +1731,7 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateConnected: - netif_notify_peers(netdev); + netdev_notify_peers(netdev); break; case XenbusStateClosing: |