diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek')
-rw-r--r-- | drivers/net/ethernet/mediatek/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 504 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.h | 59 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_sgmii.c | 174 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed.c | 863 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed.h | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_debugfs.c | 87 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_mcu.c | 390 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_regs.h | 140 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_wo.c | 512 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_wo.h | 282 |
14 files changed, 2765 insertions, 307 deletions
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 45ba0970504a..8e0c61c33ff8 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o -mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o ifdef CONFIG_DEBUG_FS mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1d36619c5ec9..e3de9a53b2d9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -23,6 +23,7 @@ #include <linux/jhash.h> #include <linux/bitfield.h> #include <net/dsa.h> +#include <net/dst_metadata.h> #include "mtk_eth_soc.h" #include "mtk_wed.h" @@ -54,6 +55,7 @@ static const struct mtk_reg_map mtk_reg_map = { }, .qdma = { .qtx_cfg = 0x1800, + .qtx_sch = 0x1804, .rx_ptr = 0x1900, .rx_cnt_cfg = 0x1904, .qcrx_ptr = 0x1908, @@ -61,6 +63,7 @@ static const struct mtk_reg_map mtk_reg_map = { .rst_idx = 0x1a08, .delay_irq = 0x1a0c, .fc_th = 0x1a10, + .tx_sch_rate = 0x1a14, .int_grp = 0x1a20, .hred = 0x1a44, .ctx_ptr = 0x1b00, @@ -113,6 +116,7 @@ static const struct mtk_reg_map mt7986_reg_map = { }, .qdma = { .qtx_cfg = 0x4400, + .qtx_sch = 0x4404, .rx_ptr = 0x4500, .rx_cnt_cfg = 0x4504, .qcrx_ptr = 0x4508, @@ -130,6 +134,7 @@ static const struct mtk_reg_map mt7986_reg_map = { .fq_tail = 0x4724, .fq_count = 0x4728, .fq_blen = 0x472c, + .tx_sch_rate = 0x4798, }, .gdm1_cnt = 0x1c00, .gdma_to_ppe = 0x3333, @@ -613,6 +618,75 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } +static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, + int speed) +{ + const struct mtk_soc_data *soc = eth->soc; + u32 ofs, val; + + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + return; + + val = MTK_QTX_SCH_MIN_RATE_EN | + /* minimum: 10 Mbps */ + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) | + MTK_QTX_SCH_LEAKY_BUCKET_SIZE; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_QTX_SCH_LEAKY_BUCKET_EN; + + if (IS_ENABLED(CONFIG_SOC_MT7621)) { + switch (speed) { + case SPEED_10: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); + break; + case SPEED_100: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); + break; + case SPEED_1000: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10); + break; + default: + break; + } + } else { + switch (speed) { + case SPEED_10: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); + break; + case SPEED_100: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); + break; + case SPEED_1000: + val |= MTK_QTX_SCH_MAX_RATE_EN | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10); + break; + default: + break; + } + } + + ofs = MTK_QTX_OFFSET * idx; + mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); +} + static void mtk_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, @@ -638,6 +712,8 @@ static void mtk_mac_link_up(struct phylink_config *config, break; } + mtk_set_queue_speed(mac->hw, mac->id, speed); + /* Configure duplex */ if (duplex == DUPLEX_FULL) mcr |= MAC_MCR_FORCE_DPX; @@ -653,7 +729,6 @@ static void mtk_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops mtk_phylink_ops = { - .validate = phylink_generic_validate, .mac_select_pcs = mtk_mac_select_pcs, .mac_pcs_get_state = mtk_mac_pcs_get_state, .mac_config = mtk_mac_config, @@ -865,7 +940,7 @@ static void mtk_get_stats64(struct net_device *dev, } do { - start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + start = u64_stats_fetch_begin(&hw_stats->syncp); storage->rx_packets = hw_stats->rx_packets; storage->tx_packets = hw_stats->tx_packets; storage->rx_bytes = hw_stats->rx_bytes; @@ -877,7 +952,7 @@ static void mtk_get_stats64(struct net_device *dev, storage->rx_crc_errors = hw_stats->rx_fcs_errors; storage->rx_errors = hw_stats->rx_checksum_errors; storage->tx_aborted_errors = hw_stats->tx_skip; - } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + } while (u64_stats_fetch_retry(&hw_stats->syncp, start)); storage->tx_errors = dev->stats.tx_errors; storage->rx_dropped = dev->stats.rx_dropped; @@ -938,7 +1013,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; - int cnt = MTK_DMA_SIZE; + int cnt = MTK_QDMA_RING_SIZE; dma_addr_t dma_addr; int i; @@ -1099,7 +1174,8 @@ static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd, WRITE_ONCE(desc->txd1, info->addr); - data = TX_DMA_SWC | TX_DMA_PLEN0(info->size); + data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) | + FIELD_PREP(TX_DMA_PQID, info->qid); if (info->last) data |= TX_DMA_LS0; WRITE_ONCE(desc->txd3, data); @@ -1133,9 +1209,6 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, data |= TX_DMA_LS0; WRITE_ONCE(desc->txd3, data); - if (!info->qid && mac->id) - info->qid = MTK_QDMA_GMAC2_QID; - data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); WRITE_ONCE(desc->txd4, data); @@ -1179,11 +1252,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, .gso = gso, .csum = skb->ip_summed == CHECKSUM_PARTIAL, .vlan = skb_vlan_tag_present(skb), - .qid = skb->mark & MTK_QDMA_TX_MASK, + .qid = skb_get_queue_mapping(skb), .vlan_tci = skb_vlan_tag_get(skb), .first = true, .last = !skb_is_nonlinear(skb), }; + struct netdev_queue *txq; struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; const struct mtk_soc_data *soc = eth->soc; @@ -1191,8 +1265,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; int i, n_desc = 1; + int queue = skb_get_queue_mapping(skb); int k = 0; + txq = netdev_get_tx_queue(dev, queue); itxd = ring->next_free; itxd_pdma = qdma_to_pdma(ring, itxd); if (itxd == ring->last_free) @@ -1241,7 +1317,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = min_t(unsigned int, frag_size, soc->txrx.dma_max_len); - txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; + txd_info.qid = queue; txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && !(frag_size - txd_info.size); txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, @@ -1280,7 +1356,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, txd_pdma->txd2 |= TX_DMA_LS1; } - netdev_sent_queue(dev, skb->len); + netdev_tx_sent_queue(txq, skb->len); skb_tx_timestamp(skb); ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); @@ -1292,8 +1368,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, wmb(); if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || - !netdev_xmit_more()) + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); } else { int next_idx; @@ -1362,7 +1437,7 @@ static void mtk_wake_queue(struct mtk_eth *eth) for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i]) continue; - netif_wake_queue(eth->netdev[i]); + netif_tx_wake_all_queues(eth->netdev[i]); } } @@ -1386,7 +1461,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_num = mtk_cal_txd_req(eth, skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); spin_unlock(ð->page_lock); @@ -1412,7 +1487,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) - netif_stop_queue(dev); + netif_tx_stop_all_queues(dev); spin_unlock(ð->page_lock); @@ -1579,10 +1654,12 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_mac *mac = netdev_priv(dev); struct mtk_tx_dma_desc_info txd_info = { .size = xdpf->len, .first = true, .last = !xdp_frame_has_frags(xdpf), + .qid = mac->id, }; int err, index = 0, n_desc = 1, nr_frags; struct mtk_tx_buf *htx_buf, *tx_buf; @@ -1632,6 +1709,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = skb_frag_size(&sinfo->frags[index]); txd_info.last = index + 1 == nr_frags; + txd_info.qid = mac->id; data = skb_frag_address(&sinfo->frags[index]); index++; @@ -1937,16 +2015,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, htons(RX_DMA_VPID(trxd.rxd4)), RX_DMA_VID(trxd.rxd4)); } else if (trxd.rxd2 & RX_DMA_VTAG) { - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)), RX_DMA_VID(trxd.rxd3)); } + } + + /* When using VLAN untagging in combination with DSA, the + * hardware treats the MTK special tag as a VLAN and untags it. + */ + if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) { + unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0); + + if (port < ARRAY_SIZE(eth->dsa_meta) && + eth->dsa_meta[port]) + skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); - /* If the device is attached to a dsa switch, the special - * tag inserted in VLAN field by hw switch can * be offloaded - * by RX HW VLAN offload. Clear vlan info. - */ - if (netdev_uses_dsa(netdev)) - __vlan_hwaccel_clear_tag(skb); + __vlan_hwaccel_clear_tag(skb); } skb_record_rx_queue(skb, 0); @@ -1986,8 +2070,46 @@ rx_done: return done; } +struct mtk_poll_state { + struct netdev_queue *txq; + unsigned int total; + unsigned int done; + unsigned int bytes; +}; + +static void +mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, + struct sk_buff *skb) +{ + struct netdev_queue *txq; + struct net_device *dev; + unsigned int bytes = skb->len; + + state->total++; + eth->tx_packets++; + eth->tx_bytes += bytes; + + dev = eth->netdev[mac]; + if (!dev) + return; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + if (state->txq == txq) { + state->done++; + state->bytes += bytes; + return; + } + + if (state->txq) + netdev_tx_completed_queue(state->txq, state->done, state->bytes); + + state->txq = txq; + state->done = 1; + state->bytes = bytes; +} + static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, - unsigned int *done, unsigned int *bytes) + struct mtk_poll_state *state) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_tx_ring *ring = ð->tx_ring; @@ -2019,12 +2141,9 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, break; if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { - if (tx_buf->type == MTK_TYPE_SKB) { - struct sk_buff *skb = tx_buf->data; + if (tx_buf->type == MTK_TYPE_SKB) + mtk_poll_tx_done(eth, state, mac, tx_buf->data); - bytes[mac] += skb->len; - done[mac]++; - } budget--; } mtk_tx_unmap(eth, tx_buf, &bq, true); @@ -2043,7 +2162,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, } static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, - unsigned int *done, unsigned int *bytes) + struct mtk_poll_state *state) { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_buf *tx_buf; @@ -2061,12 +2180,8 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, break; if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { - if (tx_buf->type == MTK_TYPE_SKB) { - struct sk_buff *skb = tx_buf->data; - - bytes[0] += skb->len; - done[0]++; - } + if (tx_buf->type == MTK_TYPE_SKB) + mtk_poll_tx_done(eth, state, 0, tx_buf->data); budget--; } mtk_tx_unmap(eth, tx_buf, &bq, true); @@ -2088,26 +2203,15 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) { struct mtk_tx_ring *ring = ð->tx_ring; struct dim_sample dim_sample = {}; - unsigned int done[MTK_MAX_DEVS]; - unsigned int bytes[MTK_MAX_DEVS]; - int total = 0, i; - - memset(done, 0, sizeof(done)); - memset(bytes, 0, sizeof(bytes)); + struct mtk_poll_state state = {}; if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - budget = mtk_poll_tx_qdma(eth, budget, done, bytes); + budget = mtk_poll_tx_qdma(eth, budget, &state); else - budget = mtk_poll_tx_pdma(eth, budget, done, bytes); + budget = mtk_poll_tx_pdma(eth, budget, &state); - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i] || !done[i]) - continue; - netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); - total += done[i]; - eth->tx_packets += done[i]; - eth->tx_bytes += bytes[i]; - } + if (state.txq) + netdev_tx_completed_queue(state.txq, state.done, state.bytes); dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, &dim_sample); @@ -2117,7 +2221,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); - return total; + return state.total; } static void mtk_handle_status_irq(struct mtk_eth *eth) @@ -2202,19 +2306,26 @@ static int mtk_tx_alloc(struct mtk_eth *eth) struct mtk_tx_ring *ring = ð->tx_ring; int i, sz = soc->txrx.txd_size; struct mtk_tx_dma_v2 *txd; + int ring_size; + u32 ofs, val; + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + ring_size = MTK_QDMA_RING_SIZE; + else + ring_size = MTK_DMA_SIZE; - ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), + ring->buf = kcalloc(ring_size, sizeof(*ring->buf), GFP_KERNEL); if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, &ring->phys, GFP_KERNEL); if (!ring->dma) goto no_tx_mem; - for (i = 0; i < MTK_DMA_SIZE; i++) { - int next = (i + 1) % MTK_DMA_SIZE; + for (i = 0; i < ring_size; i++) { + int next = (i + 1) % ring_size; u32 next_ptr = ring->phys + next * sz; txd = ring->dma + i * sz; @@ -2234,22 +2345,22 @@ static int mtk_tx_alloc(struct mtk_eth *eth) * descriptors in ring->dma_pdma. */ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, &ring->phys_pdma, GFP_KERNEL); if (!ring->dma_pdma) goto no_tx_mem; - for (i = 0; i < MTK_DMA_SIZE; i++) { + for (i = 0; i < ring_size; i++) { ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; ring->dma_pdma[i].txd4 = 0; } } - ring->dma_size = MTK_DMA_SIZE; - atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->dma_size = ring_size; + atomic_set(&ring->free_count, ring_size - 2); ring->next_free = ring->dma; ring->last_free = (void *)txd; - ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); + ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz)); ring->thresh = MAX_SKB_FRAGS; /* make sure that all changes to the dma ring are flushed before we @@ -2261,14 +2372,31 @@ static int mtk_tx_alloc(struct mtk_eth *eth) mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), + ring->phys + ((ring_size - 1) * sz), soc->reg_map->qdma.crx_ptr); mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, - soc->reg_map->qdma.qtx_cfg); + + for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) { + val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES; + mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); + + val = MTK_QTX_SCH_MIN_RATE_EN | + /* minimum: 10 Mbps */ + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) | + MTK_QTX_SCH_LEAKY_BUCKET_SIZE; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_QTX_SCH_LEAKY_BUCKET_EN; + mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); + ofs += MTK_QTX_OFFSET; + } + val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16); + mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); } else { mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); - mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); + mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); } @@ -2286,7 +2414,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) int i; if (ring->buf) { - for (i = 0; i < MTK_DMA_SIZE; i++) + for (i = 0; i < ring->dma_size; i++) mtk_tx_unmap(eth, &ring->buf[i], NULL, false); kfree(ring->buf); ring->buf = NULL; @@ -2294,14 +2422,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->dma) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma_size * soc->txrx.txd_size, ring->dma, ring->phys); ring->dma = NULL; } if (ring->dma_pdma) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma_size * soc->txrx.txd_size, ring->dma_pdma, ring->phys_pdma); ring->dma_pdma = NULL; } @@ -2727,15 +2855,30 @@ static netdev_features_t mtk_fix_features(struct net_device *dev, static int mtk_set_features(struct net_device *dev, netdev_features_t features) { - int err = 0; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + netdev_features_t diff = dev->features ^ features; + int i; - if (!((dev->features ^ features) & NETIF_F_LRO)) + if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO)) + mtk_hwlro_netdev_disable(dev); + + /* Set RX VLAN offloading */ + if (!(diff & NETIF_F_HW_VLAN_CTAG_RX)) return 0; - if (!(features & NETIF_F_LRO)) - mtk_hwlro_netdev_disable(dev); + mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX), + MTK_CDMP_EG_CTRL); - return err; + /* sync features with other MAC */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || eth->netdev[i] == dev) + continue; + eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX; + } + + return 0; } /* wait for DMA to finish whatever it is doing before we start using it again */ @@ -2823,7 +2966,7 @@ static void mtk_dma_free(struct mtk_eth *eth) netdev_reset_queue(eth->netdev[i]); if (eth->scratch_ring) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * soc->txrx.txd_size, + MTK_QDMA_RING_SIZE * soc->txrx.txd_size, eth->scratch_ring, eth->phy_scratch_ring); eth->scratch_ring = NULL; eth->phy_scratch_ring = 0; @@ -2932,7 +3075,7 @@ static int mtk_start_dma(struct mtk_eth *eth) if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | - MTK_CHK_DDONE_EN; + MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN; else val |= MTK_RX_BT_32DWORDS; mtk_w32(eth, val, reg_map->qdma.glo_cfg); @@ -2978,11 +3121,85 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config) mtk_w32(eth, 0, MTK_RST_GL); } + +static bool mtk_uses_dsa(struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + return netdev_uses_dsa(dev) && + dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK; +#else + return false; +#endif +} + +static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr) +{ + struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier); + struct mtk_eth *eth = mac->hw; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ethtool_link_ksettings s; + struct net_device *ldev; + struct list_head *iter; + struct dsa_port *dp; + + if (event != NETDEV_CHANGE) + return NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, ldev, iter) { + if (netdev_priv(ldev) == mac) + goto found; + } + + return NOTIFY_DONE; + +found: + if (!dsa_slave_dev_check(dev)) + return NOTIFY_DONE; + + if (__ethtool_get_link_ksettings(dev, &s)) + return NOTIFY_DONE; + + if (s.base.speed == 0 || s.base.speed == ((__u32)-1)) + return NOTIFY_DONE; + + dp = dsa_port_from_netdev(dev); + if (dp->index >= MTK_QDMA_NUM_QUEUES) + return NOTIFY_DONE; + + mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); + + return NOTIFY_DONE; +} + static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - int err; + int i, err; + + if (mtk_uses_dsa(dev) && !eth->prog) { + for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { + struct metadata_dst *md_dst = eth->dsa_meta[i]; + + if (md_dst) + continue; + + md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!md_dst) + return -ENOMEM; + + md_dst->u.port_info.port_id = i; + eth->dsa_meta[i] = md_dst; + } + } else { + /* Hardware special tag parsing needs to be disabled if at least + * one MAC does not use DSA. + */ + u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); + val &= ~MTK_CDMP_STAG_EN; + mtk_w32(eth, val, MTK_CDMP_IG_CTRL); + } err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { @@ -3020,7 +3237,8 @@ static int mtk_open(struct net_device *dev) refcount_inc(ð->dma_refcnt); phylink_start(mac->phylink); - netif_start_queue(dev); + netif_tx_start_all_queues(dev); + return 0; } @@ -3229,6 +3447,30 @@ static void mtk_dim_tx(struct work_struct *work) dim->state = DIM_START_MEASURE; } +static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) +{ + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; + + if (val <= 1518) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); + else if (val <= 1536) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); + else if (val <= 1552) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); + else + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); + + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); +} + static int mtk_hw_init(struct mtk_eth *eth) { u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | @@ -3268,16 +3510,17 @@ static int mtk_hw_init(struct mtk_eth *eth) return 0; } - val = RSTCTRL_FE | RSTCTRL_PPE; if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); - - val |= RSTCTRL_ETH; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) - val |= RSTCTRL_PPE1; + val = RSTCTRL_PPE0_V2; + } else { + val = RSTCTRL_PPE0; } - ethsys_reset(eth, val); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; + + ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, @@ -3303,14 +3546,26 @@ static int mtk_hw_init(struct mtk_eth *eth) * up with the more appropriate value when mtk_mac_config call is being * invoked. */ - for (i = 0; i < MTK_MAC_COUNT; i++) + for (i = 0; i < MTK_MAC_COUNT; i++) { + struct net_device *dev = eth->netdev[i]; + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); + if (dev) { + struct mtk_mac *mac = netdev_priv(dev); + + mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN); + } + } /* Indicates CDM to parse the MTK special tag from CPU * which also is working out for untag packets. */ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + val = mtk_r32(eth, MTK_CDMP_IG_CTRL); + mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); + } /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); @@ -3331,9 +3586,12 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { - /* PSE should not drop port8 and port9 packets */ + /* PSE should not drop port8 and port9 packets from WDMA Tx */ mtk_w32(eth, 0x00000300, PSE_DROP_CFG); + /* PSE should drop packets to port 8/9 on WDMA Rx ring full */ + mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); + /* PSE Free Queue Flow Control */ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); @@ -3420,7 +3678,6 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu) int length = new_mtu + MTK_RX_ETH_HLEN; struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new; if (rcu_access_pointer(eth->prog) && length > MTK_PP_MAX_BUF_SIZE) { @@ -3428,23 +3685,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; } - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; - - if (length <= 1518) - mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); - else if (length <= 1536) - mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); - else if (length <= 1552) - mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); - else - mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); - - if (mcr_new != mcr_cur) - mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - } - + mtk_set_mcr_max_rx(mac, length); dev->mtu = new_mtu; return 0; @@ -3475,11 +3716,8 @@ static void mtk_pending_work(struct work_struct *work) rtnl_lock(); dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); + set_bit(MTK_RESETTING, ð->state); - while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) - cpu_relax(); - - dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); /* stop all devices to make sure that dma is properly shut down */ for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i]) @@ -3513,7 +3751,7 @@ static void mtk_pending_work(struct work_struct *work) dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); - clear_bit_unlock(MTK_RESETTING, ð->state); + clear_bit(MTK_RESETTING, ð->state); rtnl_unlock(); } @@ -3528,6 +3766,12 @@ static int mtk_free_dev(struct mtk_eth *eth) free_netdev(eth->netdev[i]); } + for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { + if (!eth->dsa_meta[i]) + break; + metadata_dst_free(eth->dsa_meta[i]); + } + return 0; } @@ -3536,8 +3780,12 @@ static int mtk_unreg_dev(struct mtk_eth *eth) int i; for (i = 0; i < MTK_MAC_COUNT; i++) { + struct mtk_mac *mac; if (!eth->netdev[i]) continue; + mac = netdev_priv(eth->netdev[i]); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + unregister_netdevice_notifier(&mac->device_notifier); unregister_netdev(eth->netdev[i]); } @@ -3688,13 +3936,13 @@ static void mtk_get_ethtool_stats(struct net_device *dev, do { data_dst = data; - start = u64_stats_fetch_begin_irq(&hwstats->syncp); + start = u64_stats_fetch_begin(&hwstats->syncp); for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); if (mtk_page_pool_enabled(mac->hw)) mtk_ethtool_pp_stats(mac->hw, data_dst); - } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); + } while (u64_stats_fetch_retry(&hwstats->syncp, start)); } static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, @@ -3753,6 +4001,23 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + unsigned int queue = 0; + + if (netdev_uses_dsa(dev)) + queue = skb_get_queue_mapping(skb) + 3; + else + queue = mac->id; + + if (queue >= dev->num_tx_queues) + queue = 0; + + return queue; +} + static const struct ethtool_ops mtk_ethtool_ops = { .get_link_ksettings = mtk_get_link_ksettings, .set_link_ksettings = mtk_set_link_ksettings, @@ -3788,6 +4053,7 @@ static const struct net_device_ops mtk_netdev_ops = { .ndo_setup_tc = mtk_eth_setup_tc, .ndo_bpf = mtk_xdp, .ndo_xdp_xmit = mtk_xdp_xmit, + .ndo_select_queue = mtk_select_queue, }; static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) @@ -3797,6 +4063,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) struct phylink *phylink; struct mtk_mac *mac; int id, err; + int txqs = 1; if (!_id) { dev_err(eth->dev, "missing mac id\n"); @@ -3814,7 +4081,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) return -EINVAL; } - eth->netdev[id] = alloc_etherdev(sizeof(*mac)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + txqs = MTK_QDMA_NUM_QUEUES; + + eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); if (!eth->netdev[id]) { dev_err(eth->dev, "alloc_etherdev failed\n"); return -ENOMEM; @@ -3911,6 +4181,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) else eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mac->device_notifier.notifier_call = mtk_device_event; + register_netdevice_notifier(&mac->device_notifier); + } + return 0; free_netdev: @@ -4242,7 +4517,7 @@ static const struct mtk_soc_data mt7621_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, - .offload_version = 2, + .offload_version = 1, .hash_offset = 2, .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { @@ -4281,7 +4556,7 @@ static const struct mtk_soc_data mt7623_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, - .offload_version = 2, + .offload_version = 1, .hash_offset = 2, .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, .txrx = { @@ -4318,6 +4593,7 @@ static const struct mtk_soc_data mt7986_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7986_CLKS_BITMAP, .required_pctl = false, + .offload_version = 2, .hash_offset = 4, .foe_entry_size = sizeof(struct mtk_foe_entry), .txrx = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index b52f3b0177ef..18a50529ce7b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -22,11 +22,16 @@ #include <linux/bpf_trace.h> #include "mtk_ppe.h" +#define MTK_MAX_DSA_PORTS 7 +#define MTK_DSA_PORT_MASK GENMASK(2, 0) + +#define MTK_QDMA_NUM_QUEUES 16 #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN_V2 0xffff +#define MTK_QDMA_RING_SIZE 2048 #define MTK_DMA_SIZE 512 #define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) @@ -91,6 +96,9 @@ #define MTK_CDMQ_IG_CTRL 0x1400 #define MTK_CDMQ_STAG_EN BIT(0) +/* CDMQ Exgress Control Register */ +#define MTK_CDMQ_EG_CTRL 0x1404 + /* CDMP Ingress Control Register */ #define MTK_CDMP_IG_CTRL 0x400 #define MTK_CDMP_STAG_EN BIT(0) @@ -120,6 +128,7 @@ #define PSE_FQFC_CFG1 0x100 #define PSE_FQFC_CFG2 0x104 #define PSE_DROP_CFG 0x108 +#define PSE_PPE0_DROP 0x110 /* PSE Input Queue Reservation Register*/ #define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) @@ -202,8 +211,26 @@ #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) /* QDMA TX Queue Configuration Registers */ +#define MTK_QTX_OFFSET 0x10 #define QDMA_RES_THRES 4 +/* QDMA Tx Queue Scheduler Configuration Registers */ +#define MTK_QTX_SCH_TX_SEL BIT(31) +#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30) + +#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30) +#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28) +#define MTK_QTX_SCH_MIN_RATE_EN BIT(27) +#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20) +#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16) +#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12) +#define MTK_QTX_SCH_MAX_RATE_EN BIT(11) +#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4) +#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0) + +/* QDMA TX Scheduler Rate Control Register */ +#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15) + /* QDMA Global Configuration Register */ #define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_BT_32DWORDS (3 << 11) @@ -222,6 +249,7 @@ #define MTK_WCOMP_EN BIT(24) #define MTK_RESV_BUF (0x40 << 16) #define MTK_MUTLI_CNT (0x4 << 12) +#define MTK_LEAKY_BUCKET_EN BIT(11) /* QDMA Flow Control Register */ #define FC_THRES_DROP_MODE BIT(20) @@ -250,8 +278,6 @@ #define MTK_STAT_OFFSET 0x40 /* QDMA TX NUM */ -#define MTK_QDMA_TX_NUM 16 -#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1) #define QID_BITS_V2(x) (((x) & 0x3f) << 16) #define MTK_QDMA_GMAC2_QID 8 @@ -281,6 +307,7 @@ #define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) #define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) #define TX_DMA_SWC BIT(14) +#define TX_DMA_PQID GENMASK(3, 0) /* PDMA on MT7628 */ #define TX_DMA_DONE BIT(31) @@ -447,18 +474,14 @@ /* ethernet reset control register */ #define ETHSYS_RSTCTRL 0x34 #define RSTCTRL_FE BIT(6) -#define RSTCTRL_PPE BIT(31) -#define RSTCTRL_PPE1 BIT(30) +#define RSTCTRL_PPE0 BIT(31) +#define RSTCTRL_PPE0_V2 BIT(30) +#define RSTCTRL_PPE1 BIT(31) #define RSTCTRL_ETH BIT(23) /* ethernet reset check idle register */ #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 -/* ethernet reset control register */ -#define ETHSYS_RSTCTRL 0x34 -#define RSTCTRL_FE BIT(6) -#define RSTCTRL_PPE BIT(31) - /* ethernet dma channel agent map */ #define ETHSYS_DMA_AG_MAP 0x408 #define ETHSYS_DMA_AG_MAP_PDMA BIT(0) @@ -466,8 +489,10 @@ #define ETHSYS_DMA_AG_MAP_PPE BIT(2) /* SGMII subsystem config registers */ -/* Register to auto-negotiation restart */ +/* BMCR (low 16) BMSR (high 16) */ #define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_BMCR GENMASK(15, 0) +#define SGMII_BMSR GENMASK(31, 16) #define SGMII_AN_RESTART BIT(9) #define SGMII_ISOLATE BIT(10) #define SGMII_AN_ENABLE BIT(12) @@ -477,13 +502,18 @@ #define SGMII_PCS_FAULT BIT(23) #define SGMII_AN_EXPANSION_CLR BIT(30) +#define SGMSYS_PCS_ADVERTISE 0x8 +#define SGMII_ADVERTISE GENMASK(15, 0) +#define SGMII_LPA GENMASK(31, 16) + /* Register to programmable link timer, the unit in 2 * 8ns */ #define SGMSYS_PCS_LINK_TIMER 0x18 -#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) +#define SGMII_LINK_TIMER_MASK GENMASK(19, 0) +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & SGMII_LINK_TIMER_MASK) /* Register to control remote fault */ #define SGMSYS_SGMII_MODE 0x20 -#define SGMII_IF_MODE_BIT0 BIT(0) +#define SGMII_IF_MODE_SGMII BIT(0) #define SGMII_SPEED_DUPLEX_AN BIT(1) #define SGMII_SPEED_MASK GENMASK(3, 2) #define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0) @@ -932,6 +962,7 @@ struct mtk_reg_map { } pdma; struct { u32 qtx_cfg; /* tx queue configuration */ + u32 qtx_sch; /* tx queue scheduler configuration */ u32 rx_ptr; /* rx base pointer */ u32 rx_cnt_cfg; /* rx max count configuration */ u32 qcrx_ptr; /* rx cpu pointer */ @@ -949,6 +980,7 @@ struct mtk_reg_map { u32 fq_tail; /* fq tail pointer */ u32 fq_count; /* fq free page count */ u32 fq_blen; /* fq free page buffer length */ + u32 tx_sch_rate; /* tx scheduler rate control registers */ } qdma; u32 gdm1_cnt; u32 gdma_to_ppe; @@ -1114,6 +1146,8 @@ struct mtk_eth { int ip_align; + struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; + struct mtk_ppe *ppe[2]; struct rhashtable flow_table; @@ -1140,6 +1174,7 @@ struct mtk_mac { __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; int hwlro_ip_cnt; unsigned int syscfg0; + struct notifier_block device_notifier; }; /* the struct describing the SoC. these are declared in the soc_xyz.c files */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 784ecb2dc9fb..269208a841c7 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -175,6 +175,8 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) | FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf); } else { + int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f; + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | @@ -182,7 +184,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, entry->ib1 = val; val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) | - FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | + FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) | FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f); } @@ -397,6 +399,24 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, return 0; } +int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, + unsigned int queue) +{ + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + *ib2 &= ~MTK_FOE_IB2_QID_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue); + *ib2 |= MTK_FOE_IB2_PSE_QOS_V2; + } else { + *ib2 &= ~MTK_FOE_IB2_QID; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue); + *ib2 |= MTK_FOE_IB2_PSE_QOS; + } + + return 0; +} + static bool mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, struct mtk_foe_entry *data) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index a09c32539bcc..ea64fac1d425 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -68,7 +68,9 @@ enum { #define MTK_FOE_IB2_DSCP GENMASK(31, 24) /* CONFIG_MEDIATEK_NETSYS_V2 */ +#define MTK_FOE_IB2_QID_V2 GENMASK(6, 0) #define MTK_FOE_IB2_PORT_MG_V2 BIT(7) +#define MTK_FOE_IB2_PSE_QOS_V2 BIT(8) #define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9) #define MTK_FOE_IB2_MULTICAST_V2 BIT(13) #define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19) @@ -351,6 +353,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, int sid); int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, int wdma_idx, int txq, int bss, int wcid); +int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, + unsigned int queue); int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 28bbd1df3e30..81afd5ee3fbf 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -188,7 +188,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, int *wed_index) { struct mtk_wdma_info info = {}; - int pse_port, dsa_port; + int pse_port, dsa_port, queue; if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, @@ -212,8 +212,6 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, } dsa_port = mtk_flow_get_dsa_port(&dev); - if (dsa_port >= 0) - mtk_foe_entry_set_dsa(eth, foe, dsa_port); if (dev == eth->netdev[0]) pse_port = 1; @@ -222,6 +220,14 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, else return -EOPNOTSUPP; + if (dsa_port >= 0) { + mtk_foe_entry_set_dsa(eth, foe, dsa_port); + queue = 3 + dsa_port; + } else { + queue = pse_port - 1; + } + mtk_foe_entry_set_queue(eth, foe, queue); + out: mtk_foe_entry_set_pse_port(eth, foe, pse_port); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 736839c84130..5c286f2c9418 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -19,110 +19,136 @@ static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs) return container_of(pcs, struct mtk_pcs, pcs); } -/* For SGMII interface mode */ -static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) +static void mtk_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - unsigned int val; - - /* Setup the link timer and QPHY power up inside SGMIISYS */ - regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, - SGMII_LINK_TIMER_DEFAULT); - - regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val |= SGMII_REMOTE_FAULT_DIS; - regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); - - regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val |= SGMII_AN_RESTART; - regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); - - regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); - val &= ~SGMII_PHYA_PWD; - regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); + unsigned int bm, adv; - return 0; + /* Read the BMSR and LPA */ + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm); + regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv); + phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm), + FIELD_GET(SGMII_LPA, adv)); } -/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a - * fixed speed. - */ -static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, - phy_interface_t interface) +static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - unsigned int val; + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); + unsigned int rgc3, sgm_mode, bmcr; + int advertise, link_timer; + bool changed, use_an; - regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); - val &= ~RG_PHY_SPEED_MASK; if (interface == PHY_INTERFACE_MODE_2500BASEX) - val |= RG_PHY_SPEED_3_125G; - regmap_write(mpcs->regmap, mpcs->ana_rgc3, val); + rgc3 = RG_PHY_SPEED_3_125G; + else + rgc3 = 0; + + advertise = phylink_mii_c22_pcs_encode_advertisement(interface, + advertising); + if (advertise < 0) + return advertise; + + link_timer = phylink_get_link_timer_ns(interface); + if (link_timer < 0) + return link_timer; + + /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and + * we assume that fixes it's speed at bitrate = line rate (in + * other words, 1000Mbps or 2500Mbps). + */ + if (interface == PHY_INTERFACE_MODE_SGMII) { + sgm_mode = SGMII_IF_MODE_SGMII; + if (phylink_autoneg_inband(mode)) { + sgm_mode |= SGMII_REMOTE_FAULT_DIS | + SGMII_SPEED_DUPLEX_AN; + use_an = true; + } else { + use_an = false; + } + } else if (phylink_autoneg_inband(mode)) { + /* 1000base-X or 2500base-X autoneg */ + sgm_mode = SGMII_REMOTE_FAULT_DIS; + use_an = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + advertising); + } else { + /* 1000base-X or 2500base-X without autoneg */ + sgm_mode = 0; + use_an = false; + } - /* Disable SGMII AN */ - regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val &= ~SGMII_AN_ENABLE; - regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); + if (use_an) { + /* FIXME: Do we need to set AN_RESTART here? */ + bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE; + } else { + bmcr = 0; + } - /* Set the speed etc but leave the duplex unchanged */ - regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; - val |= SGMII_SPEED_1000; - regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); + /* Configure the underlying interface speed */ + regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3, + RG_PHY_SPEED_3_125G, rgc3); - /* Release PHYA power down state */ - regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); - val &= ~SGMII_PHYA_PWD; - regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); + /* Update the advertisement, noting whether it has changed */ + regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE, + SGMII_ADVERTISE, advertise, &changed); - return 0; -} + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8); -static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, - phy_interface_t interface, - const unsigned long *advertising, - bool permit_pause_to_mac) -{ - struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - int err = 0; + /* Update the sgmsys mode register */ + regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, + SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN | + SGMII_IF_MODE_SGMII, sgm_mode); + + /* Update the BMCR */ + regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, + SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr); - /* Setup SGMIISYS with the determined property */ - if (interface != PHY_INTERFACE_MODE_SGMII) - err = mtk_pcs_setup_mode_force(mpcs, interface); - else if (phylink_autoneg_inband(mode)) - err = mtk_pcs_setup_mode_an(mpcs); + /* Release PHYA power down state */ + regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, + SGMII_PHYA_PWD, 0); - return err; + return changed; } static void mtk_pcs_restart_an(struct phylink_pcs *pcs) { struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - unsigned int val; - regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); - val |= SGMII_AN_RESTART; - regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); + regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, + SGMII_AN_RESTART, SGMII_AN_RESTART); } static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex) { struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); - unsigned int val; - - if (!phy_interface_mode_is_8023z(interface)) - return; - - /* SGMII force duplex setting */ - regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); - val &= ~SGMII_DUPLEX_FULL; - if (duplex == DUPLEX_FULL) - val |= SGMII_DUPLEX_FULL; - - regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); + unsigned int sgm_mode; + + if (!phylink_autoneg_inband(mode)) { + /* Force the speed and duplex setting */ + if (speed == SPEED_10) + sgm_mode = SGMII_SPEED_10; + else if (speed == SPEED_100) + sgm_mode = SGMII_SPEED_100; + else + sgm_mode = SGMII_SPEED_1000; + + if (duplex == DUPLEX_FULL) + sgm_mode |= SGMII_DUPLEX_FULL; + + regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, + SGMII_DUPLEX_FULL | SGMII_SPEED_MASK, + sgm_mode); + } } static const struct phylink_pcs_ops mtk_pcs_ops = { + .pcs_get_state = mtk_pcs_get_state, .pcs_config = mtk_pcs_config, .pcs_an_restart = mtk_pcs_restart_an, .pcs_link_up = mtk_pcs_link_up, diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 65e01bf4b4d2..a6271449617f 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -9,6 +9,7 @@ #include <linux/skbuff.h> #include <linux/of_platform.h> #include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/mfd/syscon.h> #include <linux/debugfs.h> #include <linux/soc/mediatek/mtk_wed.h> @@ -16,12 +17,14 @@ #include "mtk_wed_regs.h" #include "mtk_wed.h" #include "mtk_ppe.h" +#include "mtk_wed_wo.h" #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) #define MTK_WED_PKT_SIZE 1900 #define MTK_WED_BUF_SIZE 2048 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) +#define MTK_WED_RX_RING_SIZE 1536 #define MTK_WED_TX_RING_SIZE 2048 #define MTK_WED_WDMA_RING_SIZE 1024 @@ -30,6 +33,10 @@ #define MTK_WED_PER_GROUP_PKT 128 #define MTK_WED_FBUF_SIZE 128 +#define MTK_WED_MIOD_CNT 16 +#define MTK_WED_FB_CMD_CNT 1024 +#define MTK_WED_RRO_QUE_CNT 8192 +#define MTK_WED_MIOD_ENTRY_CNT 128 static struct mtk_wed_hw *hw_list[2]; static DEFINE_MUTEX(hw_lock); @@ -64,12 +71,81 @@ wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) wdma_m32(dev, reg, 0, mask); } +static void +wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + wdma_m32(dev, reg, mask, 0); +} + +static u32 +wifi_r32(struct mtk_wed_device *dev, u32 reg) +{ + return readl(dev->wlan.base + reg); +} + +static void +wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + writel(val, dev->wlan.base + reg); +} + static u32 mtk_wed_read_reset(struct mtk_wed_device *dev) { return wed_r32(dev, MTK_WED_RESET); } +static u32 +mtk_wdma_read_reset(struct mtk_wed_device *dev) +{ + return wdma_r32(dev, MTK_WDMA_GLO_CFG); +} + +static int +mtk_wdma_rx_reset(struct mtk_wed_device *dev) +{ + u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; + int i, ret; + + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); + ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, + !(status & mask), 0, 10000); + if (ret) + dev_err(dev->hw->dev, "rx reset failed\n"); + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { + if (dev->rx_wdma[i].desc) + continue; + + wdma_w32(dev, + MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); + } + + return ret; +} + +static void +mtk_wdma_tx_reset(struct mtk_wed_device *dev) +{ + u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; + int i; + + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); + if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, + !(status & mask), 0, 10000)) + dev_err(dev->hw->dev, "tx reset failed\n"); + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + wdma_w32(dev, + MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); +} + static void mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) { @@ -81,6 +157,55 @@ mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) WARN_ON_ONCE(1); } +static u32 +mtk_wed_wo_read_status(struct mtk_wed_device *dev) +{ + return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); +} + +static void +mtk_wed_wo_reset(struct mtk_wed_device *dev) +{ + struct mtk_wed_wo *wo = dev->hw->wed_wo; + u8 state = MTK_WED_WO_STATE_DISABLE; + void __iomem *reg; + u32 val; + + mtk_wdma_tx_reset(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, + MTK_WED_WO_CMD_CHANGE_STATE, &state, + sizeof(state), false)) + return; + + if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, + val == MTK_WED_WOIF_DISABLE_DONE, + 100, MTK_WOCPU_TIMEOUT)) + dev_err(dev->hw->dev, "failed to disable wed-wo\n"); + + reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); + + val = readl(reg); + switch (dev->hw->index) { + case 0: + val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; + writel(val, reg); + val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; + writel(val, reg); + break; + case 1: + val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; + writel(val, reg); + val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; + writel(val, reg); + break; + default: + break; + } + iounmap(reg); +} + static struct mtk_wed_hw * mtk_wed_assign(struct mtk_wed_device *dev) { @@ -115,7 +240,7 @@ out: } static int -mtk_wed_buffer_alloc(struct mtk_wed_device *dev) +mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) { struct mtk_wdma_desc *desc; dma_addr_t desc_phys; @@ -132,16 +257,16 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev) if (!page_list) return -ENOMEM; - dev->buf_ring.size = ring_size; - dev->buf_ring.pages = page_list; + dev->tx_buf_ring.size = ring_size; + dev->tx_buf_ring.pages = page_list; desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), &desc_phys, GFP_KERNEL); if (!desc) return -ENOMEM; - dev->buf_ring.desc = desc; - dev->buf_ring.desc_phys = desc_phys; + dev->tx_buf_ring.desc = desc; + dev->tx_buf_ring.desc_phys = desc_phys; for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; @@ -202,10 +327,10 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev) } static void -mtk_wed_free_buffer(struct mtk_wed_device *dev) +mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) { - struct mtk_wdma_desc *desc = dev->buf_ring.desc; - void **page_list = dev->buf_ring.pages; + struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; + void **page_list = dev->tx_buf_ring.pages; int page_idx; int i; @@ -215,7 +340,8 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev) if (!desc) goto free_pagelist; - for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; + i += MTK_WED_BUF_PER_PAGE) { void *page = page_list[page_idx++]; dma_addr_t buf_addr; @@ -228,13 +354,59 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev) __free_page(page); } - dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), - desc, dev->buf_ring.desc_phys); + dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), + desc, dev->tx_buf_ring.desc_phys); free_pagelist: kfree(page_list); } +static int +mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_rxbm_desc *desc; + dma_addr_t desc_phys; + + dev->rx_buf_ring.size = dev->wlan.rx_nbuf; + desc = dma_alloc_coherent(dev->hw->dev, + dev->wlan.rx_nbuf * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->rx_buf_ring.desc = desc; + dev->rx_buf_ring.desc_phys = desc_phys; + dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); + + return 0; +} + +static void +mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) +{ + struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; + + if (!desc) + return; + + dev->wlan.release_rx_buf(dev); + dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), + desc, dev->rx_buf_ring.desc_phys); +} + +static void +mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) +{ + wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, + FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); + wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); + wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | + FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); + wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, + FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); +} + static void mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) { @@ -246,14 +418,21 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) } static void +mtk_wed_free_rx_rings(struct mtk_wed_device *dev) +{ + mtk_wed_free_rx_buffer(dev); + mtk_wed_free_ring(dev, &dev->rro.ring); +} + +static void mtk_wed_free_tx_rings(struct mtk_wed_device *dev) { int i; for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) mtk_wed_free_ring(dev, &dev->tx_ring[i]); - for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) - mtk_wed_free_ring(dev, &dev->tx_wdma[i]); + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) + mtk_wed_free_ring(dev, &dev->rx_wdma[i]); } static void @@ -290,6 +469,38 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) } } +#define MTK_WFMDA_RX_DMA_EN BIT(2) +static void +mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) +{ + u32 val; + int i; + + if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) + return; /* queue is not configured by mt76 */ + + for (i = 0; i < 3; i++) { + u32 cur_idx; + + cur_idx = wed_r32(dev, + MTK_WED_WPDMA_RING_RX_DATA(idx) + + MTK_WED_RING_OFS_CPU_IDX); + if (cur_idx == MTK_WED_RX_RING_SIZE - 1) + break; + + usleep_range(100000, 200000); + } + + if (i == 3) { + dev_err(dev->hw->dev, "rx dma enable failed\n"); + return; + } + + val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | + MTK_WFMDA_RX_DMA_EN; + wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); +} + static void mtk_wed_dma_disable(struct mtk_wed_device *dev) { @@ -303,59 +514,88 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev) MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); - wdma_m32(dev, MTK_WDMA_GLO_CFG, + wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); if (dev->hw->version == 1) { regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); - wdma_m32(dev, MTK_WDMA_GLO_CFG, - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); + wdma_clr(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); } else { wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); - mtk_wed_set_512_support(dev, false); + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_EN); + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); } + + mtk_wed_set_512_support(dev, false); } static void mtk_wed_stop(struct mtk_wed_device *dev) { - mtk_wed_dma_disable(dev); mtk_wed_set_ext_int(dev, false); - wed_clr(dev, MTK_WED_CTRL, - MTK_WED_CTRL_WDMA_INT_AGENT_EN | - MTK_WED_CTRL_WPDMA_INT_AGENT_EN | - MTK_WED_CTRL_WED_TX_BM_EN | - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); wdma_w32(dev, MTK_WDMA_INT_MASK, 0); wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); + + if (dev->hw->version == 1) + return; + + wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); + wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); } static void -mtk_wed_detach(struct mtk_wed_device *dev) +mtk_wed_deinit(struct mtk_wed_device *dev) { - struct mtk_wed_hw *hw = dev->hw; + mtk_wed_stop(dev); + mtk_wed_dma_disable(dev); - mutex_lock(&hw_lock); + wed_clr(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - mtk_wed_stop(dev); + if (dev->hw->version == 1) + return; - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + wed_clr(dev, MTK_WED_CTRL, + MTK_WED_CTRL_RX_ROUTE_QM_EN | + MTK_WED_CTRL_WED_RX_BM_EN | + MTK_WED_CTRL_RX_RRO_QM_EN); +} - mtk_wed_reset(dev, MTK_WED_RESET_WED); +static void +__mtk_wed_detach(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw = dev->hw; + + mtk_wed_deinit(dev); - mtk_wed_free_buffer(dev); + mtk_wdma_rx_reset(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_free_tx_buffer(dev); mtk_wed_free_tx_rings(dev); + if (mtk_wed_get_rx_capa(dev)) { + if (hw->wed_wo) + mtk_wed_wo_reset(dev); + mtk_wed_free_rx_rings(dev); + if (hw->wed_wo) + mtk_wed_wo_deinit(hw); + } + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { struct device_node *wlan_node; @@ -373,6 +613,13 @@ mtk_wed_detach(struct mtk_wed_device *dev) module_put(THIS_MODULE); hw->wed_dev = NULL; +} + +static void +mtk_wed_detach(struct mtk_wed_device *dev) +{ + mutex_lock(&hw_lock); + __mtk_wed_detach(dev); mutex_unlock(&hw_lock); } @@ -431,10 +678,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev) } else { mtk_wed_bus_init(dev); - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); - wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); - wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); + wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); } } @@ -443,7 +692,7 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev) { u32 mask, set; - mtk_wed_stop(dev); + mtk_wed_deinit(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_set_wpdma(dev); @@ -484,6 +733,132 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev) } } +static int +mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, + int size) +{ + ring->desc = dma_alloc_coherent(dev->hw->dev, + size * sizeof(*ring->desc), + &ring->desc_phys, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_size = sizeof(*ring->desc); + ring->size = size; + memset(ring->desc, 0, size); + + return 0; +} + +#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) +static int +mtk_wed_rro_alloc(struct mtk_wed_device *dev) +{ + struct reserved_mem *rmem; + struct device_node *np; + int index; + + index = of_property_match_string(dev->hw->node, "memory-region-names", + "wo-dlm"); + if (index < 0) + return index; + + np = of_parse_phandle(dev->hw->node, "memory-region", index); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + + if (!rmem) + return -ENODEV; + + dev->rro.miod_phys = rmem->base; + dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; + + return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, + MTK_WED_RRO_QUE_CNT); +} + +static int +mtk_wed_rro_cfg(struct mtk_wed_device *dev) +{ + struct mtk_wed_wo *wo = dev->hw->wed_wo; + struct { + struct { + __le32 base; + __le32 cnt; + __le32 unit; + } ring[2]; + __le32 wed; + u8 version; + } req = { + .ring[0] = { + .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), + .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), + .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), + }, + .ring[1] = { + .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + + MTK_WED_MIOD_COUNT), + .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), + .unit = cpu_to_le32(4), + }, + }; + + return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, + MTK_WED_WO_CMD_WED_CFG, + &req, sizeof(req), true); +} + +static void +mtk_wed_rro_hw_init(struct mtk_wed_device *dev) +{ + wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, + FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | + FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | + FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, + MTK_WED_MIOD_ENTRY_CNT >> 2)); + + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, + FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, + FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); + wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); + wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); + + wed_set(dev, MTK_WED_RROQM_RST_IDX, + MTK_WED_RROQM_RST_IDX_MIOD | + MTK_WED_RROQM_RST_IDX_FDBK); + + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); + wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); +} + +static void +mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) +{ + wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); + + for (;;) { + usleep_range(100, 200); + if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) + break; + } + + /* configure RX_ROUTE_QM */ + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); + wed_set(dev, MTK_WED_RTQM_GLO_CFG, + FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); + wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); + /* enable RX_ROUTE_QM */ + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); +} + static void mtk_wed_hw_init(struct mtk_wed_device *dev) { @@ -495,11 +870,11 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, - dev->buf_ring.size / 128) | + dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, MTK_WED_TX_RING_SIZE / 256)); - wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); @@ -526,9 +901,9 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, - dev->buf_ring.size / 128) | + dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, - dev->buf_ring.size / 128)); + dev->tx_buf_ring.size / 128)); wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | MTK_WED_TX_TKID_DYN_THR_HI); @@ -536,18 +911,28 @@ mtk_wed_hw_init(struct mtk_wed_device *dev) mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); - if (dev->hw->version == 1) + if (dev->hw->version == 1) { wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); - else + } else { wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); + /* rx hw init */ + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, + MTK_WED_WPDMA_RX_D_RST_CRX_IDX | + MTK_WED_WPDMA_RX_D_RST_DRV_IDX); + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); + + mtk_wed_rx_buffer_hw_init(dev); + mtk_wed_rro_hw_init(dev); + mtk_wed_route_qm_hw_init(dev); + } wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); } static void -mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) +mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) { void *head = (void *)ring->desc; int i; @@ -557,49 +942,140 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); desc->buf0 = 0; - desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + if (tx) + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + else + desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); desc->buf1 = 0; desc->info = 0; } } static u32 -mtk_wed_check_busy(struct mtk_wed_device *dev) +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) { - if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) - return true; - - if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & - MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) - return true; - - if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) - return true; - - if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) - return true; - - if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) - return true; - - if (wed_r32(dev, MTK_WED_CTRL) & - (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) - return true; - - return false; + return !!(wed_r32(dev, reg) & mask); } static int -mtk_wed_poll_busy(struct mtk_wed_device *dev) +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) { int sleep = 15000; int timeout = 100 * sleep; u32 val; return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, - timeout, false, dev); + timeout, false, dev, reg, mask); +} + +static int +mtk_wed_rx_reset(struct mtk_wed_device *dev) +{ + struct mtk_wed_wo *wo = dev->hw->wed_wo; + u8 val = MTK_WED_WO_STATE_SER_RESET; + int i, ret; + + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, + MTK_WED_WO_CMD_CHANGE_STATE, &val, + sizeof(val), true); + if (ret) + return ret; + + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); + if (ret) { + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); + } else { + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, + MTK_WED_WPDMA_RX_D_RST_CRX_IDX | + MTK_WED_WPDMA_RX_D_RST_DRV_IDX); + + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); + + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); + } + + /* reset rro qm */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_RX_RRO_QM_BUSY); + if (ret) { + mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); + } else { + wed_set(dev, MTK_WED_RROQM_RST_IDX, + MTK_WED_RROQM_RST_IDX_MIOD | + MTK_WED_RROQM_RST_IDX_FDBK); + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); + } + + /* reset route qm */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_RX_ROUTE_QM_BUSY); + if (ret) + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); + else + wed_set(dev, MTK_WED_RTQM_GLO_CFG, + MTK_WED_RTQM_Q_RST); + + /* reset tx wdma */ + mtk_wdma_tx_reset(dev); + + /* reset tx wdma drv */ + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); + + /* reset wed rx dma */ + ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_RX_DMA_BUSY); + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); + if (ret) { + mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); + } else { + struct mtk_eth *eth = dev->hw->eth; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + wed_set(dev, MTK_WED_RESET_IDX, + MTK_WED_RESET_IDX_RX_V2); + else + wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + + /* reset rx bm */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); + mtk_wed_poll_busy(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_RX_BM_BUSY); + mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); + + /* wo change to enable state */ + val = MTK_WED_WO_STATE_ENABLE; + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, + MTK_WED_WO_CMD_CHANGE_STATE, &val, + sizeof(val), true); + if (ret) + return ret; + + /* wed_rx_ring_reset */ + for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { + if (!dev->rx_ring[i].desc) + continue; + + mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, + false); + } + mtk_wed_free_rx_buffer(dev); + + return 0; } static void @@ -613,23 +1089,27 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) if (!dev->tx_ring[i].desc) continue; - mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); + mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, + true); } - if (mtk_wed_poll_busy(dev)) - busy = mtk_wed_check_busy(dev); - + /* 1. reset WED tx DMA */ + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); + busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); } else { - wed_w32(dev, MTK_WED_RESET_IDX, - MTK_WED_RESET_IDX_TX | - MTK_WED_RESET_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); wed_w32(dev, MTK_WED_RESET_IDX, 0); } - wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); - wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + /* 2. reset WDMA rx DMA */ + busy = !!mtk_wdma_rx_reset(dev); + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + if (!busy) + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); @@ -646,6 +1126,9 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); } + /* 3. reset WED WPDMA tx */ + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + for (i = 0; i < 100; i++) { val = wed_r32(dev, MTK_WED_TX_BM_INTF); if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) @@ -653,8 +1136,19 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) } mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + /* 4. reset WED WPDMA tx */ + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + if (!busy) + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); + if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); @@ -666,11 +1160,21 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev) wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); } + dev->init_done = false; + if (dev->hw->version == 1) + return; + + if (!busy) { + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + + mtk_wed_rx_reset(dev); } static int mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, - int size, u32 desc_size) + int size, u32 desc_size, bool tx) { ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, &ring->desc_phys, GFP_KERNEL); @@ -679,18 +1183,24 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, ring->desc_size = desc_size; ring->size = size; - mtk_wed_ring_reset(ring, size); + mtk_wed_ring_reset(ring, size, tx); return 0; } static int -mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) +mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, + bool reset) { u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; - struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; + struct mtk_wed_ring *wdma; + + if (idx >= ARRAY_SIZE(dev->rx_wdma)) + return -EINVAL; - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) + wdma = &dev->rx_wdma[idx]; + if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, + desc_size, true)) return -ENOMEM; wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, @@ -707,6 +1217,64 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) return 0; } +static int +mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, + bool reset) +{ + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; + struct mtk_wed_ring *wdma; + + if (idx >= ARRAY_SIZE(dev->tx_wdma)) + return -EINVAL; + + wdma = &dev->tx_wdma[idx]; + if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, + desc_size, true)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, + size); + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); + + if (reset) + mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); + + if (!idx) { + wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, + size); + wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, + 0); + wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, + 0); + } + + return 0; +} + +static void +mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, + u32 reason, u32 hash) +{ + struct mtk_eth *eth = dev->hw->eth; + struct ethhdr *eh; + + if (!skb) + return; + + if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + return; + + skb_set_mac_header(skb, 0); + eh = eth_hdr(skb); + skb->protocol = eh->h_proto; + mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); +} + static void mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) { @@ -729,6 +1297,8 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); } else { + wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, + GENMASK(1, 0)); /* initail tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | @@ -747,6 +1317,16 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, dev->wlan.txfree_tbit)); + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, + MTK_WED_WPDMA_INT_CTRL_RX0_EN | + MTK_WED_WPDMA_INT_CTRL_RX0_CLR | + MTK_WED_WPDMA_INT_CTRL_RX1_EN | + MTK_WED_WPDMA_INT_CTRL_RX1_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, + dev->wlan.rx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, + dev->wlan.rx_tbit[1])); + wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); wed_set(dev, MTK_WED_WDMA_INT_CTRL, FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, @@ -784,9 +1364,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); } else { + int i; + wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); @@ -794,6 +1380,15 @@ mtk_wed_dma_enable(struct mtk_wed_device *dev) wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, + MTK_WED_WPDMA_RX_D_RX_DRV_EN | + FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | + FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, + 0x2)); + + for (i = 0; i < MTK_WED_RX_QUEUES; i++) + mtk_wed_check_wfdma_rx_fill(dev, i); } } @@ -802,9 +1397,12 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) { int i; - for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) - if (!dev->tx_wdma[i].desc) - mtk_wed_wdma_ring_setup(dev, i, 16); + if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) + return; + + for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) + if (!dev->rx_wdma[i].desc) + mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); mtk_wed_hw_init(dev); mtk_wed_configure_irq(dev, irq_mask); @@ -819,9 +1417,22 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) val |= BIT(0) | (BIT(1) * !!dev->hw->index); regmap_write(dev->hw->mirror, dev->hw->index * 4, val); } else { - mtk_wed_set_512_support(dev, true); + /* driver set mid ready and only once */ + wed_w32(dev, MTK_WED_EXT_INT_MASK1, + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); + wed_w32(dev, MTK_WED_EXT_INT_MASK2, + MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); + + wed_r32(dev, MTK_WED_EXT_INT_MASK1); + wed_r32(dev, MTK_WED_EXT_INT_MASK2); + + if (mtk_wed_rro_cfg(dev)) + return; + } + mtk_wed_set_512_support(dev, dev->wlan.wcid_512); + mtk_wed_dma_enable(dev); dev->running = true; } @@ -853,7 +1464,7 @@ mtk_wed_attach(struct mtk_wed_device *dev) if (!hw) { module_put(THIS_MODULE); ret = -ENODEV; - goto out; + goto unlock; } device = dev->wlan.bus_type == MTK_WED_BUS_PCIE @@ -866,30 +1477,44 @@ mtk_wed_attach(struct mtk_wed_device *dev) dev->dev = hw->dev; dev->irq = hw->irq; dev->wdma_idx = hw->index; + dev->version = hw->version; if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) mtk_eth_set_dma_device(hw->eth, hw->dev); - ret = mtk_wed_buffer_alloc(dev); - if (ret) { - mtk_wed_detach(dev); + ret = mtk_wed_tx_buffer_alloc(dev); + if (ret) goto out; + + if (mtk_wed_get_rx_capa(dev)) { + ret = mtk_wed_rro_alloc(dev); + if (ret) + goto out; } mtk_wed_hw_init_early(dev); - if (hw->hifsys) + if (hw->version == 1) { regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); - + } else { + dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); + ret = mtk_wed_wo_init(hw); + } out: + if (ret) { + dev_err(dev->hw->dev, "failed to attach wed device\n"); + __mtk_wed_detach(dev); + } +unlock: mutex_unlock(&hw_lock); return ret; } static int -mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, + bool reset) { struct mtk_wed_ring *ring = &dev->tx_ring[idx]; @@ -905,13 +1530,15 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) * WDMA RX. */ - BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); + if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) + return -EINVAL; - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, - sizeof(*ring->desc))) + if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, + sizeof(*ring->desc), true)) return -ENOMEM; - if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) + if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, + reset)) return -ENOMEM; ring->reg_base = MTK_WED_RING_TX(idx); @@ -955,6 +1582,39 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) return 0; } +static int +mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, + bool reset) +{ + struct mtk_wed_ring *ring = &dev->rx_ring[idx]; + + if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) + return -EINVAL; + + if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, + sizeof(*ring->desc), false)) + return -ENOMEM; + + if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, + reset)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_RX_DATA(idx); + ring->wpdma = regs; + ring->flags |= MTK_WED_RING_CONFIGURED; + + /* WPDMA -> WED */ + wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); + wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); + + wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, + ring->desc_phys); + wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, + MTK_WED_RX_RING_SIZE); + + return 0; +} + static u32 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) { @@ -1051,7 +1711,9 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, static const struct mtk_wed_ops wed_ops = { .attach = mtk_wed_attach, .tx_ring_setup = mtk_wed_tx_ring_setup, + .rx_ring_setup = mtk_wed_rx_ring_setup, .txfree_ring_setup = mtk_wed_txfree_ring_setup, + .msg_update = mtk_wed_mcu_msg_update, .start = mtk_wed_start, .stop = mtk_wed_stop, .reset_dma = mtk_wed_reset_dma, @@ -1060,6 +1722,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, .irq_get = mtk_wed_irq_get, .irq_set_mask = mtk_wed_irq_set_mask, .detach = mtk_wed_detach, + .ppe_check = mtk_wed_ppe_check, }; struct device_node *eth_np = eth->dev->of_node; struct platform_device *pdev; diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h index ae420ca01a48..e012b8a82133 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -10,6 +10,7 @@ #include <linux/netdevice.h> struct mtk_eth; +struct mtk_wed_wo; struct mtk_wed_hw { struct device_node *node; @@ -22,6 +23,7 @@ struct mtk_wed_hw { struct regmap *mirror; struct dentry *debugfs_dir; struct mtk_wed_device *wed_dev; + struct mtk_wed_wo *wed_wo; u32 debugfs_reg; u32 num_flows; u8 version; @@ -85,6 +87,24 @@ wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) } static inline u32 +wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg) +{ + if (!dev->rx_ring[ring].wpdma) + return 0; + + return readl(dev->rx_ring[ring].wpdma + reg); +} + +static inline void +wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) +{ + if (!dev->rx_ring[ring].wpdma) + return; + + writel(val, dev->rx_ring[ring].wpdma + reg); +} + +static inline u32 wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) { if (!dev->txfree_ring.wpdma) @@ -126,6 +146,7 @@ static inline int mtk_wed_flow_add(int index) static inline void mtk_wed_flow_remove(int index) { } + #endif #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c index f420f187e837..56f663439721 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ #include <linux/seq_file.h> +#include <linux/soc/mediatek/mtk_wed.h> #include "mtk_wed.h" #include "mtk_wed_regs.h" @@ -18,6 +19,8 @@ enum { DUMP_TYPE_WDMA, DUMP_TYPE_WPDMA_TX, DUMP_TYPE_WPDMA_TXFREE, + DUMP_TYPE_WPDMA_RX, + DUMP_TYPE_WED_RRO, }; #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } @@ -36,6 +39,9 @@ enum { #define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) #define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) +#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n) +#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO) +#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO) static void print_reg_val(struct seq_file *s, const char *name, u32 val) @@ -57,6 +63,7 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, cur > regs ? "\n" : "", cur->name); continue; + case DUMP_TYPE_WED_RRO: case DUMP_TYPE_WED: val = wed_r32(dev, cur->offset); break; @@ -69,6 +76,9 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, case DUMP_TYPE_WPDMA_TXFREE: val = wpdma_txfree_r32(dev, cur->offset); break; + case DUMP_TYPE_WPDMA_RX: + val = wpdma_rx_r32(dev, cur->base, cur->offset); + break; } print_reg_val(s, cur->name, val); } @@ -132,6 +142,80 @@ wed_txinfo_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(wed_txinfo); +static int +wed_rxinfo_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WPDMA RX"), + DUMP_WPDMA_RX_RING(0), + DUMP_WPDMA_RX_RING(1), + + DUMP_STR("WPDMA RX"), + DUMP_WED(WED_WPDMA_RX_D_MIB(0)), + DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)), + DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)), + DUMP_WED(WED_WPDMA_RX_D_MIB(1)), + DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)), + DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)), + DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB), + + DUMP_STR("WED RX"), + DUMP_WED_RING(WED_RING_RX_DATA(0)), + DUMP_WED_RING(WED_RING_RX_DATA(1)), + + DUMP_STR("WED RRO"), + DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0), + DUMP_WED(WED_RROQM_MID_MIB), + DUMP_WED(WED_RROQM_MOD_MIB), + DUMP_WED(WED_RROQM_MOD_COHERENT_MIB), + DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0), + DUMP_WED(WED_RROQM_FDBK_IND_MIB), + DUMP_WED(WED_RROQM_FDBK_ENQ_MIB), + DUMP_WED(WED_RROQM_FDBK_ANC_MIB), + DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB), + + DUMP_STR("WED Route QM"), + DUMP_WED(WED_RTQM_R2H_MIB(0)), + DUMP_WED(WED_RTQM_R2Q_MIB(0)), + DUMP_WED(WED_RTQM_Q2H_MIB(0)), + DUMP_WED(WED_RTQM_R2H_MIB(1)), + DUMP_WED(WED_RTQM_R2Q_MIB(1)), + DUMP_WED(WED_RTQM_Q2H_MIB(1)), + DUMP_WED(WED_RTQM_Q2N_MIB), + DUMP_WED(WED_RTQM_Q2B_MIB), + DUMP_WED(WED_RTQM_PFDBK_MIB), + + DUMP_STR("WED WDMA TX"), + DUMP_WED(WED_WDMA_TX_MIB), + DUMP_WED_RING(WED_WDMA_RING_TX), + + DUMP_STR("WDMA TX"), + DUMP_WDMA(WDMA_GLO_CFG), + DUMP_WDMA_RING(WDMA_RING_TX(0)), + DUMP_WDMA_RING(WDMA_RING_TX(1)), + + DUMP_STR("WED RX BM"), + DUMP_WED(WED_RX_BM_BASE), + DUMP_WED(WED_RX_BM_RX_DMAD), + DUMP_WED(WED_RX_BM_PTR), + DUMP_WED(WED_RX_BM_TKID_MIB), + DUMP_WED(WED_RX_BM_BLEN), + DUMP_WED(WED_RX_BM_STS), + DUMP_WED(WED_RX_BM_INTF2), + DUMP_WED(WED_RX_BM_INTF), + DUMP_WED(WED_RX_BM_ERR_STS), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (!dev) + return 0; + + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_rxinfo); static int mtk_wed_reg_set(void *data, u64 val) @@ -175,4 +259,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); + if (hw->version != 1) + debugfs_create_file_unsafe("rxinfo", 0400, dir, hw, + &wed_rxinfo_fops); } diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c new file mode 100644 index 000000000000..6bad0d262f28 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2022 MediaTek Inc. + * + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + * Sujuan Chen <sujuan.chen@mediatek.com> + */ + +#include <linux/firmware.h> +#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> +#include <linux/mfd/syscon.h> +#include <linux/soc/mediatek/mtk_wed.h> +#include <asm/unaligned.h> + +#include "mtk_wed_regs.h" +#include "mtk_wed_wo.h" +#include "mtk_wed.h" + +static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg) +{ + return readl(wo->boot.addr + reg); +} + +static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) +{ + writel(val, wo->boot.addr + reg); +} + +static struct sk_buff * +mtk_wed_mcu_msg_alloc(const void *data, int data_len) +{ + int length = sizeof(struct mtk_wed_mcu_hdr) + data_len; + struct sk_buff *skb; + + skb = alloc_skb(length, GFP_KERNEL); + if (!skb) + return NULL; + + memset(skb->head, 0, length); + skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr)); + if (data && data_len) + skb_put_data(skb, data, data_len); + + return skb; +} + +static struct sk_buff * +mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires) +{ + if (!time_is_after_jiffies(expires)) + return NULL; + + wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q), + expires - jiffies); + return skb_dequeue(&wo->mcu.res_q); +} + +void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb) +{ + skb_queue_tail(&wo->mcu.res_q, skb); + wake_up(&wo->mcu.wait); +} + +static void +mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb) +{ + u32 count = get_unaligned_le32(skb->data); + struct mtk_wed_wo_rx_stats *stats; + int i; + + if (count * sizeof(*stats) > skb->len - sizeof(u32)) + return; + + stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32)); + for (i = 0 ; i < count ; i++) + wed->wlan.update_wo_rx_stats(wed, &stats[i]); +} + +void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, + struct sk_buff *skb) +{ + struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; + + skb_pull(skb, sizeof(*hdr)); + + switch (hdr->cmd) { + case MTK_WED_WO_EVT_LOG_DUMP: + dev_notice(wo->hw->dev, "%s\n", skb->data); + break; + case MTK_WED_WO_EVT_PROFILING: { + struct mtk_wed_wo_log_info *info = (void *)skb->data; + u32 count = skb->len / sizeof(*info); + int i; + + for (i = 0 ; i < count ; i++) + dev_notice(wo->hw->dev, + "SN:%u latency: total=%u, rro:%u, mod:%u\n", + le32_to_cpu(info[i].sn), + le32_to_cpu(info[i].total), + le32_to_cpu(info[i].rro), + le32_to_cpu(info[i].mod)); + break; + } + case MTK_WED_WO_EVT_RXCNT_INFO: + mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); + break; + default: + break; + } + + dev_kfree_skb(skb); +} + +static int +mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb, + int id, int cmd, u16 *wait_seq, bool wait_resp) +{ + struct mtk_wed_mcu_hdr *hdr; + + /* TODO: make it dynamic based on cmd */ + wo->mcu.timeout = 20 * HZ; + + hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr)); + hdr->cmd = cmd; + hdr->length = cpu_to_le16(skb->len); + + if (wait_resp && wait_seq) { + u16 seq = ++wo->mcu.seq; + + if (!seq) + seq = ++wo->mcu.seq; + *wait_seq = seq; + + hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP); + hdr->seq = cpu_to_le16(seq); + } + if (id == MTK_WED_MODULE_ID_WO) + hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO); + + return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb); +} + +static int +mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb, + int cmd, int seq) +{ + struct mtk_wed_mcu_hdr *hdr; + + if (!skb) { + dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n", + cmd, seq); + return -ETIMEDOUT; + } + + hdr = (struct mtk_wed_mcu_hdr *)skb->data; + if (le16_to_cpu(hdr->seq) != seq) + return -EAGAIN; + + skb_pull(skb, sizeof(*hdr)); + switch (cmd) { + case MTK_WED_WO_CMD_RXCNT_INFO: + mtk_wed_update_rx_stats(wo->hw->wed_dev, skb); + break; + default: + break; + } + + return 0; +} + +int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, + const void *data, int len, bool wait_resp) +{ + unsigned long expires; + struct sk_buff *skb; + u16 seq; + int ret; + + skb = mtk_wed_mcu_msg_alloc(data, len); + if (!skb) + return -ENOMEM; + + mutex_lock(&wo->mcu.mutex); + + ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp); + if (ret || !wait_resp) + goto unlock; + + expires = jiffies + wo->mcu.timeout; + do { + skb = mtk_wed_mcu_get_response(wo, expires); + ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq); + dev_kfree_skb(skb); + } while (ret == -EAGAIN); + +unlock: + mutex_unlock(&wo->mcu.mutex); + + return ret; +} + +int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, + int len) +{ + struct mtk_wed_wo *wo = dev->hw->wed_wo; + + if (dev->hw->version == 1) + return 0; + + if (WARN_ON(!wo)) + return -ENODEV; + + return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len, + true); +} + +static int +mtk_wed_get_memory_region(struct mtk_wed_wo *wo, + struct mtk_wed_wo_memory_region *region) +{ + struct reserved_mem *rmem; + struct device_node *np; + int index; + + index = of_property_match_string(wo->hw->node, "memory-region-names", + region->name); + if (index < 0) + return index; + + np = of_parse_phandle(wo->hw->node, "memory-region", index); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + + if (!rmem) + return -ENODEV; + + region->phy_addr = rmem->base; + region->size = rmem->size; + region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size); + + return !region->addr ? -EINVAL : 0; +} + +static int +mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw, + struct mtk_wed_wo_memory_region *region) +{ + const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data; + const struct mtk_wed_fw_trailer *trailer; + const struct mtk_wed_fw_region *fw_region; + + trailer_ptr = fw->data + fw->size - sizeof(*trailer); + trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr; + region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region); + first_region_ptr = region_ptr; + + while (region_ptr < trailer_ptr) { + u32 length; + + fw_region = (const struct mtk_wed_fw_region *)region_ptr; + length = le32_to_cpu(fw_region->len); + + if (region->phy_addr != le32_to_cpu(fw_region->addr)) + goto next; + + if (region->size < length) + goto next; + + if (first_region_ptr < ptr + length) + goto next; + + if (region->shared && region->consumed) + return 0; + + if (!region->shared || !region->consumed) { + memcpy_toio(region->addr, ptr, length); + region->consumed = true; + return 0; + } +next: + region_ptr += sizeof(*fw_region); + ptr += length; + } + + return -EINVAL; +} + +static int +mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo) +{ + static struct mtk_wed_wo_memory_region mem_region[] = { + [MTK_WED_WO_REGION_EMI] = { + .name = "wo-emi", + }, + [MTK_WED_WO_REGION_ILM] = { + .name = "wo-ilm", + }, + [MTK_WED_WO_REGION_DATA] = { + .name = "wo-data", + .shared = true, + }, + }; + const struct mtk_wed_fw_trailer *trailer; + const struct firmware *fw; + const char *fw_name; + u32 val, boot_cr; + int ret, i; + + /* load firmware region metadata */ + for (i = 0; i < ARRAY_SIZE(mem_region); i++) { + ret = mtk_wed_get_memory_region(wo, &mem_region[i]); + if (ret) + return ret; + } + + wo->boot.name = "wo-boot"; + ret = mtk_wed_get_memory_region(wo, &wo->boot); + if (ret) + return ret; + + /* set dummy cr */ + wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL, + wo->hw->index + 1); + + /* load firmware */ + fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0; + ret = request_firmware(&fw, fw_name, wo->hw->dev); + if (ret) + return ret; + + trailer = (void *)(fw->data + fw->size - + sizeof(struct mtk_wed_fw_trailer)); + dev_info(wo->hw->dev, + "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n", + trailer->fw_ver, trailer->build_date); + dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n", + trailer->chip_id, trailer->num_region); + + for (i = 0; i < ARRAY_SIZE(mem_region); i++) { + ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]); + if (ret) + goto out; + } + + /* set the start address */ + boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR + : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR; + wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16); + /* wo firmware reset */ + wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00); + + val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR); + val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK + : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK; + wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val); +out: + release_firmware(fw); + + return ret; +} + +static u32 +mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo) +{ + return wed_r32(wo->hw->wed_dev, + MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL); +} + +int mtk_wed_mcu_init(struct mtk_wed_wo *wo) +{ + u32 val; + int ret; + + skb_queue_head_init(&wo->mcu.res_q); + init_waitqueue_head(&wo->mcu.wait); + mutex_init(&wo->mcu.mutex); + + ret = mtk_wed_mcu_load_firmware(wo); + if (ret) + return ret; + + return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val, + 100, MTK_FW_DL_TIMEOUT); +} + +MODULE_FIRMWARE(MT7986_FIRMWARE_WO0); +MODULE_FIRMWARE(MT7986_FIRMWARE_WO1); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h index e270fb336143..0a50bb98c5ea 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -4,6 +4,7 @@ #ifndef __MTK_WED_REGS_H #define __MTK_WED_REGS_H +#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8) #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) #define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0) #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) @@ -19,15 +20,23 @@ struct mtk_wdma_desc { __le32 info; } __packed __aligned(4); +#define MTK_WED_REV_ID 0x004 + #define MTK_WED_RESET 0x008 #define MTK_WED_RESET_TX_BM BIT(0) +#define MTK_WED_RESET_RX_BM BIT(1) #define MTK_WED_RESET_TX_FREE_AGENT BIT(4) #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) +#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10) #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) #define MTK_WED_RESET_WED_TX_DMA BIT(12) +#define MTK_WED_RESET_WED_RX_DMA BIT(13) +#define MTK_WED_RESET_WDMA_TX_DRV BIT(16) #define MTK_WED_RESET_WDMA_RX_DRV BIT(17) #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) +#define MTK_WED_RESET_RX_RRO_QM BIT(20) +#define MTK_WED_RESET_RX_ROUTE_QM BIT(21) #define MTK_WED_RESET_WED BIT(31) #define MTK_WED_CTRL 0x00c @@ -39,8 +48,12 @@ struct mtk_wdma_desc { #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) #define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) -#define MTK_WED_CTRL_RESERVE_EN BIT(12) -#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) +#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12) +#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13) +#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14) +#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15) +#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16) +#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17) #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) @@ -62,6 +75,9 @@ struct mtk_wdma_desc { #define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22) #define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23) #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25) +#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26) +#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27) #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ @@ -71,6 +87,8 @@ struct mtk_wdma_desc { MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR) #define MTK_WED_EXT_INT_MASK 0x028 +#define MTK_WED_EXT_INT_MASK1 0x02c +#define MTK_WED_EXT_INT_MASK2 0x030 #define MTK_WED_STATUS 0x060 #define MTK_WED_STATUS_TX GENMASK(15, 8) @@ -144,6 +162,8 @@ struct mtk_wdma_desc { #define MTK_WED_RESET_IDX 0x20c #define MTK_WED_RESET_IDX_TX GENMASK(3, 0) #define MTK_WED_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6) +#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30) #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4) @@ -151,7 +171,9 @@ struct mtk_wdma_desc { #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) #define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) +#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10) +#define MTK_WED_SCR0 0x3c0 #define MTK_WED_WPDMA_INT_TRIGGER 0x504 #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) @@ -212,6 +234,12 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) #define MTK_WED_WPDMA_INT_CTRL_RX 0x534 +#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10) #define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 #define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) @@ -241,11 +269,37 @@ struct mtk_wdma_desc { #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) +#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10) + +#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c +#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0) +#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1) +#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3) +#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4) +#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7) +#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24) + +#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760 +#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16) +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24) + +#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c +#define MTK_WED_WPDMA_RX_RING 0x770 + +#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4) +#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4) +#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c + +#define MTK_WED_WDMA_RING_TX 0x800 + +#define MTK_WED_WDMA_TX_MIB 0x810 + #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) #define MTK_WED_WDMA_GLO_CFG 0xa04 #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1) #define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) #define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) #define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) @@ -290,6 +344,20 @@ struct mtk_wdma_desc { #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) +#define MTK_WED_RX_BM_RX_DMAD 0xd80 +#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0) + +#define MTK_WED_RX_BM_BASE 0xd84 +#define MTK_WED_RX_BM_INIT_PTR 0xd88 +#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0) +#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16) + +#define MTK_WED_RX_PTR 0xd8c + +#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4 +#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16) +#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0) + #define MTK_WED_RING_OFS_BASE 0x00 #define MTK_WED_RING_OFS_COUNT 0x04 #define MTK_WED_RING_OFS_CPU_IDX 0x08 @@ -300,7 +368,9 @@ struct mtk_wdma_desc { #define MTK_WDMA_GLO_CFG 0x204 #define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1) #define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3) #define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26) #define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27) #define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28) @@ -329,4 +399,70 @@ struct mtk_wdma_desc { /* DMA channel mapping */ #define HIFSYS_DMA_AG_MAP 0x008 +#define MTK_WED_RTQM_GLO_CFG 0xb00 +#define MTK_WED_RTQM_BUSY BIT(1) +#define MTK_WED_RTQM_Q_RST BIT(2) +#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5) +#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20) + +#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4) +#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4) +#define MTK_WED_RTQM_Q2N_MIB 0xb80 +#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4) + +#define MTK_WED_RTQM_Q2B_MIB 0xb8c +#define MTK_WED_RTQM_PFDBK_MIB 0xb90 + +#define MTK_WED_RROQM_GLO_CFG 0xc04 +#define MTK_WED_RROQM_RST_IDX 0xc08 +#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0) +#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4) + +#define MTK_WED_RROQM_MIOD_CTRL0 0xc40 +#define MTK_WED_RROQM_MIOD_CTRL1 0xc44 +#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0) + +#define MTK_WED_RROQM_MIOD_CTRL2 0xc48 +#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c + +#define MTK_WED_RROQM_FDBK_CTRL0 0xc50 +#define MTK_WED_RROQM_FDBK_CTRL1 0xc54 +#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0) + +#define MTK_WED_RROQM_FDBK_CTRL2 0xc58 + +#define MTK_WED_RROQ_BASE_L 0xc80 +#define MTK_WED_RROQ_BASE_H 0xc84 + +#define MTK_WED_RROQM_MIOD_CFG 0xc8c +#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0) +#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8) +#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16) + +#define MTK_WED_RROQM_MID_MIB 0xcc0 +#define MTK_WED_RROQM_MOD_MIB 0xcc4 +#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8 +#define MTK_WED_RROQM_FDBK_MIB 0xcd0 +#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4 +#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0 +#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4 +#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8 +#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec + +#define MTK_WED_RX_BM_RX_DMAD 0xd80 +#define MTK_WED_RX_BM_BASE 0xd84 +#define MTK_WED_RX_BM_INIT_PTR 0xd88 +#define MTK_WED_RX_BM_PTR 0xd8c +#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16) +#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0) + +#define MTK_WED_RX_BM_BLEN 0xd90 +#define MTK_WED_RX_BM_STS 0xd94 +#define MTK_WED_RX_BM_INTF2 0xd98 +#define MTK_WED_RX_BM_INTF 0xd9c +#define MTK_WED_RX_BM_ERR_STS 0xda8 + +#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000 +#define MTK_WED_PCIE_INT_MASK 0x0 + #endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c new file mode 100644 index 000000000000..a0a39643caf7 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2022 MediaTek Inc. + * + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + * Sujuan Chen <sujuan.chen@mediatek.com> + */ + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/mfd/syscon.h> +#include <linux/of_irq.h> +#include <linux/bitfield.h> + +#include "mtk_wed.h" +#include "mtk_wed_regs.h" +#include "mtk_wed_wo.h" + +static u32 +mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg) +{ + u32 val; + + if (regmap_read(wo->mmio.regs, reg, &val)) + val = ~0; + + return val; +} + +static void +mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val) +{ + regmap_write(wo->mmio.regs, reg, val); +} + +static u32 +mtk_wed_wo_get_isr(struct mtk_wed_wo *wo) +{ + u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM); + + return val & MTK_WED_WO_CCIF_RCHNUM_MASK; +} + +static void +mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask) +{ + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask); +} + +static void +mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask) +{ + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask); +} + +static void +mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set) +{ + unsigned long flags; + + spin_lock_irqsave(&wo->mmio.lock, flags); + wo->mmio.irq_mask &= ~mask; + wo->mmio.irq_mask |= val; + if (set) + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); + spin_unlock_irqrestore(&wo->mmio.lock, flags); +} + +static void +mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask) +{ + mtk_wed_wo_set_isr_mask(wo, 0, mask, false); + tasklet_schedule(&wo->mmio.irq_tasklet); +} + +static void +mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask) +{ + mtk_wed_wo_set_isr_mask(wo, mask, 0, true); +} + +static void +mtk_wed_wo_kickout(struct mtk_wed_wo *wo) +{ + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM); + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM); +} + +static void +mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, + u32 val) +{ + wmb(); + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); +} + +static void * +mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, + bool flush) +{ + int buf_len = SKB_WITH_OVERHEAD(q->buf_size); + int index = (q->tail + 1) % q->n_desc; + struct mtk_wed_wo_queue_entry *entry; + struct mtk_wed_wo_queue_desc *desc; + void *buf; + + if (!q->queued) + return NULL; + + if (flush) + q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); + else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) + return NULL; + + q->tail = index; + q->queued--; + + desc = &q->desc[index]; + entry = &q->entry[index]; + buf = entry->buf; + if (len) + *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0, + le32_to_cpu(READ_ONCE(desc->ctrl))); + if (buf) + dma_unmap_single(wo->hw->dev, entry->addr, buf_len, + DMA_FROM_DEVICE); + entry->buf = NULL; + + return buf; +} + +static int +mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, + bool rx) +{ + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + int n_buf = 0; + + spin_lock_bh(&q->lock); + while (q->queued < q->n_desc) { + struct mtk_wed_wo_queue_entry *entry; + dma_addr_t addr; + void *buf; + + buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC); + if (!buf) + break; + + addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) { + skb_free_frag(buf); + break; + } + + q->head = (q->head + 1) % q->n_desc; + entry = &q->entry[q->head]; + entry->addr = addr; + entry->len = q->buf_size; + q->entry[q->head].buf = buf; + + if (rx) { + struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; + u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 | + FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, + entry->len); + + WRITE_ONCE(desc->buf0, cpu_to_le32(addr)); + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); + } + q->queued++; + n_buf++; + } + spin_unlock_bh(&q->lock); + + return n_buf; +} + +static void +mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo) +{ + mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK); + mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK); +} + +static void +mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) +{ + for (;;) { + struct mtk_wed_mcu_hdr *hdr; + struct sk_buff *skb; + void *data; + u32 len; + + data = mtk_wed_wo_dequeue(wo, q, &len, false); + if (!data) + break; + + skb = build_skb(data, q->buf_size); + if (!skb) { + skb_free_frag(data); + continue; + } + + __skb_put(skb, len); + if (mtk_wed_mcu_check_msg(wo, skb)) { + dev_kfree_skb(skb); + continue; + } + + hdr = (struct mtk_wed_mcu_hdr *)skb->data; + if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP)) + mtk_wed_mcu_rx_event(wo, skb); + else + mtk_wed_mcu_rx_unsolicited_event(wo, skb); + } + + if (mtk_wed_wo_queue_refill(wo, q, true)) { + u32 index = (q->head - 1) % q->n_desc; + + mtk_wed_wo_queue_kick(wo, q, index); + } +} + +static irqreturn_t +mtk_wed_wo_irq_handler(int irq, void *data) +{ + struct mtk_wed_wo *wo = data; + + mtk_wed_wo_set_isr(wo, 0); + tasklet_schedule(&wo->mmio.irq_tasklet); + + return IRQ_HANDLED; +} + +static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t) +{ + struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet); + u32 intr, mask; + + /* disable interrupts */ + mtk_wed_wo_set_isr(wo, 0); + + intr = mtk_wed_wo_get_isr(wo); + intr &= wo->mmio.irq_mask; + mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK); + mtk_wed_wo_irq_disable(wo, mask); + + if (intr & MTK_WED_WO_RXCH_INT_MASK) { + mtk_wed_wo_rx_run_queue(wo, &wo->q_rx); + mtk_wed_wo_rx_complete(wo); + } +} + +/* mtk wed wo hw queues */ + +static int +mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, + int n_desc, int buf_size, int index, + struct mtk_wed_wo_queue_regs *regs) +{ + spin_lock_init(&q->lock); + q->regs = *regs; + q->n_desc = n_desc; + q->buf_size = buf_size; + + q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), + &q->desc_dma, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + return 0; +} + +static void +mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) +{ + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); + dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, + q->desc_dma); +} + +static void +mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) +{ + struct page *page; + int i; + + spin_lock_bh(&q->lock); + for (i = 0; i < q->n_desc; i++) { + struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; + + dma_unmap_single(wo->hw->dev, entry->addr, entry->len, + DMA_TO_DEVICE); + skb_free_frag(entry->buf); + entry->buf = NULL; + } + spin_unlock_bh(&q->lock); + + if (!q->cache.va) + return; + + page = virt_to_page(q->cache.va); + __page_frag_cache_drain(page, q->cache.pagecnt_bias); + memset(&q->cache, 0, sizeof(q->cache)); +} + +static void +mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) +{ + struct page *page; + + spin_lock_bh(&q->lock); + for (;;) { + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); + + if (!buf) + break; + + skb_free_frag(buf); + } + spin_unlock_bh(&q->lock); + + if (!q->cache.va) + return; + + page = virt_to_page(q->cache.va); + __page_frag_cache_drain(page, q->cache.pagecnt_bias); + memset(&q->cache, 0, sizeof(q->cache)); +} + +static void +mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) +{ + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); + mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); + mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); +} + +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, + struct sk_buff *skb) +{ + struct mtk_wed_wo_queue_entry *entry; + struct mtk_wed_wo_queue_desc *desc; + int ret = 0, index; + u32 ctrl; + + spin_lock_bh(&q->lock); + + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); + index = (q->head + 1) % q->n_desc; + if (q->tail == index) { + ret = -ENOMEM; + goto out; + } + + entry = &q->entry[index]; + if (skb->len > entry->len) { + ret = -ENOMEM; + goto out; + } + + desc = &q->desc[index]; + q->head = index; + + dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len, + DMA_TO_DEVICE); + memcpy(entry->buf, skb->data, skb->len); + dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len, + DMA_TO_DEVICE); + + ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) | + MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE; + WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr)); + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); + + mtk_wed_wo_queue_kick(wo, q, q->head); + mtk_wed_wo_kickout(wo); +out: + spin_unlock_bh(&q->lock); + + dev_kfree_skb(skb); + + return ret; +} + +static int +mtk_wed_wo_exception_init(struct mtk_wed_wo *wo) +{ + return 0; +} + +static int +mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo) +{ + struct mtk_wed_wo_queue_regs regs; + struct device_node *np; + int ret; + + np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0); + if (!np) + return -ENODEV; + + wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL); + if (IS_ERR(wo->mmio.regs)) { + ret = PTR_ERR(wo->mmio.regs); + goto error_put; + } + + wo->mmio.irq = irq_of_parse_and_map(np, 0); + wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK; + spin_lock_init(&wo->mmio.lock); + tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet); + + ret = devm_request_irq(wo->hw->dev, wo->mmio.irq, + mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH, + KBUILD_MODNAME, wo); + if (ret) + goto error; + + regs.desc_base = MTK_WED_WO_CCIF_DUMMY1; + regs.ring_size = MTK_WED_WO_CCIF_DUMMY2; + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4; + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3; + + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE, + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM, + ®s); + if (ret) + goto error; + + mtk_wed_wo_queue_refill(wo, &wo->q_tx, false); + mtk_wed_wo_queue_reset(wo, &wo->q_tx); + + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5; + regs.ring_size = MTK_WED_WO_CCIF_DUMMY6; + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8; + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7; + + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE, + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM, + ®s); + if (ret) + goto error; + + mtk_wed_wo_queue_refill(wo, &wo->q_rx, true); + mtk_wed_wo_queue_reset(wo, &wo->q_rx); + + /* rx queue irqmask */ + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask); + + return 0; + +error: + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); +error_put: + of_node_put(np); + return ret; +} + +static void +mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo) +{ + /* disable interrupts */ + mtk_wed_wo_set_isr(wo, 0); + + tasklet_disable(&wo->mmio.irq_tasklet); + + disable_irq(wo->mmio.irq); + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo); + + mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx); + mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx); + mtk_wed_wo_queue_free(wo, &wo->q_tx); + mtk_wed_wo_queue_free(wo, &wo->q_rx); +} + +int mtk_wed_wo_init(struct mtk_wed_hw *hw) +{ + struct mtk_wed_wo *wo; + int ret; + + wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL); + if (!wo) + return -ENOMEM; + + hw->wed_wo = wo; + wo->hw = hw; + + ret = mtk_wed_wo_hardware_init(wo); + if (ret) + return ret; + + ret = mtk_wed_mcu_init(wo); + if (ret) + return ret; + + return mtk_wed_wo_exception_init(wo); +} + +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw) +{ + struct mtk_wed_wo *wo = hw->wed_wo; + + mtk_wed_wo_hw_deinit(wo); +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h new file mode 100644 index 000000000000..c8fb85795864 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */ + +#ifndef __MTK_WED_WO_H +#define __MTK_WED_WO_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +struct mtk_wed_hw; + +struct mtk_wed_mcu_hdr { + /* DW0 */ + u8 version; + u8 cmd; + __le16 length; + + /* DW1 */ + __le16 seq; + __le16 flag; + + /* DW2 */ + __le32 status; + + /* DW3 */ + u8 rsv[20]; +}; + +struct mtk_wed_wo_log_info { + __le32 sn; + __le32 total; + __le32 rro; + __le32 mod; +}; + +enum mtk_wed_wo_event { + MTK_WED_WO_EVT_LOG_DUMP = 0x1, + MTK_WED_WO_EVT_PROFILING = 0x2, + MTK_WED_WO_EVT_RXCNT_INFO = 0x3, +}; + +#define MTK_WED_MODULE_ID_WO 1 +#define MTK_FW_DL_TIMEOUT 4000000 /* us */ +#define MTK_WOCPU_TIMEOUT 2000000 /* us */ + +enum { + MTK_WED_WARP_CMD_FLAG_RSP = BIT(0), + MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1), + MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2), +}; + +#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050 +#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20 +#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1 + +enum { + MTK_WED_WO_REGION_EMI, + MTK_WED_WO_REGION_ILM, + MTK_WED_WO_REGION_DATA, + MTK_WED_WO_REGION_BOOT, + __MTK_WED_WO_REGION_MAX, +}; + +enum mtk_wed_wo_state { + MTK_WED_WO_STATE_UNDEFINED, + MTK_WED_WO_STATE_INIT, + MTK_WED_WO_STATE_ENABLE, + MTK_WED_WO_STATE_DISABLE, + MTK_WED_WO_STATE_HALT, + MTK_WED_WO_STATE_GATING, + MTK_WED_WO_STATE_SER_RESET, + MTK_WED_WO_STATE_WF_RESET, +}; + +enum mtk_wed_wo_done_state { + MTK_WED_WOIF_UNDEFINED, + MTK_WED_WOIF_DISABLE_DONE, + MTK_WED_WOIF_TRIGGER_ENABLE, + MTK_WED_WOIF_ENABLE_DONE, + MTK_WED_WOIF_TRIGGER_GATING, + MTK_WED_WOIF_GATING_DONE, + MTK_WED_WOIF_TRIGGER_HALT, + MTK_WED_WOIF_HALT_DONE, +}; + +enum mtk_wed_dummy_cr_idx { + MTK_WED_DUMMY_CR_FWDL, + MTK_WED_DUMMY_CR_WO_STATUS, +}; + +#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin" +#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin" + +#define MTK_WO_MCU_CFG_LS_BASE 0 +#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000) +#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004) +#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c) +#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010) +#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014) +#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018) +#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c) +#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050) +#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060) +#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064) + +#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5) +#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0) + +#define MTK_WED_WO_RING_SIZE 256 +#define MTK_WED_WO_CMD_LEN 1504 + +#define MTK_WED_WO_TXCH_NUM 0 +#define MTK_WED_WO_RXCH_NUM 1 +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7 + +#define MTK_WED_WO_TXCH_INT_MASK BIT(0) +#define MTK_WED_WO_RXCH_INT_MASK BIT(1) +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7) +#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \ + MTK_WED_WO_EXCEPTION_INT_MASK) + +#define MTK_WED_WO_CCIF_BUSY 0x004 +#define MTK_WED_WO_CCIF_START 0x008 +#define MTK_WED_WO_CCIF_TCHNUM 0x00c +#define MTK_WED_WO_CCIF_RCHNUM 0x010 +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0) + +#define MTK_WED_WO_CCIF_ACK 0x014 +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018 +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c +#define MTK_WED_WO_CCIF_DUMMY1 0x020 +#define MTK_WED_WO_CCIF_DUMMY2 0x024 +#define MTK_WED_WO_CCIF_DUMMY3 0x028 +#define MTK_WED_WO_CCIF_DUMMY4 0x02c +#define MTK_WED_WO_CCIF_SHADOW1 0x030 +#define MTK_WED_WO_CCIF_SHADOW2 0x034 +#define MTK_WED_WO_CCIF_SHADOW3 0x038 +#define MTK_WED_WO_CCIF_SHADOW4 0x03c +#define MTK_WED_WO_CCIF_DUMMY5 0x050 +#define MTK_WED_WO_CCIF_DUMMY6 0x054 +#define MTK_WED_WO_CCIF_DUMMY7 0x058 +#define MTK_WED_WO_CCIF_DUMMY8 0x05c +#define MTK_WED_WO_CCIF_SHADOW5 0x060 +#define MTK_WED_WO_CCIF_SHADOW6 0x064 +#define MTK_WED_WO_CCIF_SHADOW7 0x068 +#define MTK_WED_WO_CCIF_SHADOW8 0x06c + +#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0) +#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14) +#define MTK_WED_WO_CTL_BURST BIT(15) +#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16 +#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16) +#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30) +#define MTK_WED_WO_CTL_DMA_DONE BIT(31) +#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0) + +struct mtk_wed_wo_memory_region { + const char *name; + void __iomem *addr; + phys_addr_t phy_addr; + u32 size; + bool shared:1; + bool consumed:1; +}; + +struct mtk_wed_fw_region { + __le32 decomp_crc; + __le32 decomp_len; + __le32 decomp_blk_sz; + u8 rsv0[4]; + __le32 addr; + __le32 len; + u8 feature_set; + u8 rsv1[15]; +} __packed; + +struct mtk_wed_fw_trailer { + u8 chip_id; + u8 eco_code; + u8 num_region; + u8 format_ver; + u8 format_flag; + u8 rsv[2]; + char fw_ver[10]; + char build_date[15]; + u32 crc; +}; + +struct mtk_wed_wo_queue_regs { + u32 desc_base; + u32 ring_size; + u32 cpu_idx; + u32 dma_idx; +}; + +struct mtk_wed_wo_queue_desc { + __le32 buf0; + __le32 ctrl; + __le32 buf1; + __le32 info; + __le32 reserved[4]; +} __packed __aligned(32); + +struct mtk_wed_wo_queue_entry { + dma_addr_t addr; + void *buf; + u32 len; +}; + +struct mtk_wed_wo_queue { + struct mtk_wed_wo_queue_regs regs; + + struct page_frag_cache cache; + spinlock_t lock; + + struct mtk_wed_wo_queue_desc *desc; + dma_addr_t desc_dma; + + struct mtk_wed_wo_queue_entry *entry; + + u16 head; + u16 tail; + int n_desc; + int queued; + int buf_size; + +}; + +struct mtk_wed_wo { + struct mtk_wed_hw *hw; + struct mtk_wed_wo_memory_region boot; + + struct mtk_wed_wo_queue q_tx; + struct mtk_wed_wo_queue q_rx; + + struct { + struct mutex mutex; + int timeout; + u16 seq; + + struct sk_buff_head res_q; + wait_queue_head_t wait; + } mcu; + + struct { + struct regmap *regs; + + spinlock_t lock; + struct tasklet_struct irq_tasklet; + int irq; + u32 irq_mask; + } mmio; +}; + +static inline int +mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb) +{ + struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data; + + if (hdr->version) + return -EINVAL; + + if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length)) + return -EINVAL; + + return 0; +} + +void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb); +void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo, + struct sk_buff *skb); +int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd, + const void *data, int len, bool wait_resp); +int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data, + int len); +int mtk_wed_mcu_init(struct mtk_wed_wo *wo); +int mtk_wed_wo_init(struct mtk_wed_hw *hw); +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw); +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q, + struct sk_buff *skb); + +#endif /* __MTK_WED_WO_H */ |